text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2009-2010 Craig J. Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Reports/Graphical Reports/Familial Tree
Reports/Graphical Reports/Personal Tree
"""
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.ggettext import sgettext as _
from gramps.gen.errors import ReportError
from gramps.gen.plug.menu import TextOption
from gramps.gen.plug.menu import NumberOption
from gramps.gen.plug.menu import EnumeratedListOption
from gramps.gen.plug.menu import StringOption
from gramps.gen.plug.menu import BooleanOption
from gramps.gen.plug.menu import PersonOption
from gramps.gen.plug.menu import FamilyOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
PT2CM = ReportUtils.pt2cm
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_BORN = _('short for born|b.')
_DIED = _('short for died|d.')
_MARR = _('short for married|m.')
_RPT_NAME = 'descend_chart'
from gramps.plugins.lib.libtreebase import *
#------------------------------------------------------------------------
#
# Box classes
#
#------------------------------------------------------------------------
class DescendantBoxBase(BoxBase):
"""
Base for all descendant boxes.
Set the boxstr and some new attributes that are needed
"""
def __init__(self, boxstr):
BoxBase.__init__(self)
self.boxstr = boxstr
self.next = None
self.father = None
def calc_text(self, database, person, family):
""" A single place to calculate box text """
gui = GuiConnect()
calc = gui.calc_lines(database)
self.text = calc.calc_lines(person, family,
gui.working_lines(self))
class PersonBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level, boldable = 0):
DescendantBoxBase.__init__(self, "CG2-box")
self.level = level
def set_bold(self):
""" update me to a bolded box """
self.boxstr = "CG2b-box"
class FamilyBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level):
DescendantBoxBase.__init__(self, "CG2-fam-box")
self.level = level
class PlaceHolderBox(BoxBase):
"""
I am a box that does not print. I am used to make sure information
does not run over areas that we don't want information (boxes)
"""
def __init__(self, level):
BoxBase.__init__(self)
self.boxstr = "None"
self.level = level
self.line_to = None
self.next = None
def calc_text(self, database, person, family):
""" move along. Nothing to see here """
return
#------------------------------------------------------------------------
#
# Titles Class(es)
#
#------------------------------------------------------------------------
class DescendantTitleBase(TitleBox):
def __init__(self, dbase, doc, boxstr = "CG2-Title"):
TitleBox.__init__(self, doc, boxstr)
self.database = dbase
def descendant_print(self, person_list, person_list2 = []):
""" calculate the Descendant title
Person_list will always be passed
If in the Family reports and there are two families, person_list2
will be used.
"""
if len(person_list) == len(person_list2) == 1:
person_list = person_list + person_list2
person_list2 = []
names = self._get_names(person_list)
if person_list2:
names2 = self._get_names(person_list2)
if len(names) + len(names2) == 3:
if len(names) == 1:
title = _("Descendant Chart for %(person)s and "
"%(father1)s, %(mother1)s") % \
{'person': names[0],
'father1': names2[0],
'mother1': names2[1],
}
else: # Should be 2 items in names list
title = _("Descendant Chart for %(person)s, %(father1)s "
"and %(mother1)s") % \
{'father1': names[0],
'mother1': names[1],
'person': names2[0],
}
else: # Should be 2 items in both names and names2 lists
title = _("Descendant Chart for %(father1)s, %(father2)s "
"and %(mother1)s, %(mother2)s") % \
{'father1': names[0],
'mother1': names[1],
'father2': names2[0],
'mother2': names2[1],
}
else: # No person_list2: Just one family
if len(names) == 1:
title = _("Descendant Chart for %(person)s") % \
{'person': names[0]}
else: # Should be two items in names list
title = _("Descendant Chart for %(father)s and %(mother)s") % \
{'father': names[0],
'mother': names[1],
}
return title
def get_parents(self, family_id):
""" For a family_id, return the father and mother """
family1 = self.database.get_family_from_gramps_id(family_id)
father_h = family1.get_father_handle()
mother_h = family1.get_mother_handle()
parents = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parents
class TitleNone(TitleNoDisplay):
"""Family Chart Title class for the report """
def __init__(self, dbase, doc):
TitleNoDisplay.__init__(self, doc, "CG2-Title")
def calc_title(self, persons):
"""Calculate the title of the report"""
self.text = 'Descendant Graph'
class TitleDPY(DescendantTitleBase):
"""Descendant (Person yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
family2_h = center.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
person_list = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
person_list = [self.database.get_person_from_handle(handle)
for handle in [father2_h, mother2_h] if handle]
if not person_list:
person_list = [center]
self.text = self.descendant_print(person_list)
self.set_box_height_width()
class TitleDPN(DescendantTitleBase):
"""Descendant (Person no start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
title = self.descendant_print([center])
self.text = title
self.set_box_height_width()
class TitleDFY(DescendantTitleBase):
"""Descendant (Family yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def get_parent_list(self, person):
""" return a list of my parents. If none, return me """
if not person:
return None
parent_list = None
family_h = person.get_main_parents_family_handle()
family = self.database.get_family_from_handle(family_h)
if family: #family = fathers parents
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
parent_list = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parent_list or [person]
def calc_title(self, family_id):
"""Calculate the title of the report"""
my_parents = self.get_parents(family_id)
dad_parents = self.get_parent_list(my_parents[0])
mom_parents = []
if len(my_parents) > 1:
if not dad_parents:
dad_parents = self.get_parent_list(my_parents[1])
else:
mom_parents = self.get_parent_list(my_parents[1])
self.text = self.descendant_print(dad_parents, mom_parents)
self.set_box_height_width()
class TitleDFN(DescendantTitleBase):
"""Descendant (Family no start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, family_id):
"""Calculate the title of the report"""
self.text = self.descendant_print(
self.get_parents(family_id) )
self.set_box_height_width()
class TitleF(DescendantTitleBase):
"""Family Chart Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, family_id):
"""Calculate the title of the report"""
parents = self.get_parents(family_id)
names = self._get_names(parents)
if len(parents) == 1:
title = _("Family Chart for %(person)s") % {'person': names[0] }
elif len(parents) == 2:
title = _("Family Chart for %(father1)s and %(mother1)s") % \
{'father1': names[0], 'mother1': names[1] }
#else:
# title = str(tmp) + " " + str(len(tmp))
self.text = title
self.set_box_height_width()
class TitleC(DescendantTitleBase):
"""Cousin Chart Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, family_id):
"""Calculate the title of the report"""
family = self.database.get_family_from_gramps_id(family_id)
kids = [self.database.get_person_from_handle(kid.ref)
for kid in family.get_child_ref_list()]
#ok we have the children. Make a title off of them
tmp = self._get_names(kids)
self.text = _("Cousin Chart for " + ", ".join(self._get_names(kids)))
self.set_box_height_width()
#------------------------------------------------------------------------
#
# Class RecurseDown
#
#------------------------------------------------------------------------
class RecurseDown:
"""
The main recursive functions that will use add_person to make
the tree of people (Descendants) to be included within the report.
"""
def __init__(self, dbase, canvas):
self.database = dbase
self.canvas = canvas
self.families_seen = set()
self.cols = []
self.__last_direct = []
gui = GuiConnect()
self.do_parents = gui.get_val('show_parents')
self.max_generations = gui.get_val('maxgen')
self.max_spouses = gui.get_val('maxspouse')
self.inlc_marr = gui.get_val("inc_marr")
if not self.max_spouses:
self.inlc_marr = False
#is the option even available?
self.bold_direct = gui.get_val('bolddirect')
#can we bold direct descendants?
#bold_now will have only three values
#0 - no bolding
#1 - Only bold the first person
#2 - Bold all direct descendants
self.bold_now = 0
gui = None
def add_to_col(self, box):
"""
Add the box to a column on the canvas. we will do these things:
set the .next attrib for the boxs in this col
get the height and width of this box and set it no the column
also we set the .x_cm to any s_level (indentation) here
we will calculate the real .x_cm later (with indentation)
"""
level = box.level[0]
#make the column list of people
while len(self.cols) <= level:
self.cols.append(None)
self.__last_direct.append(None)
if self.cols[level]: #if (not the first box in this column)
last_box = self.cols[level]
last_box.next = box
#calculate the .y_cm for this box.
box.y_cm = last_box.y_cm
box.y_cm += last_box.height
if last_box.boxstr in ["CG2-box", "CG2b-box"]:
box.y_cm += self.canvas.report_opts.box_shadow
if box.boxstr in ["CG2-box", "CG2b-box"]:
box.y_cm += self.canvas.report_opts.box_pgap
else:
box.y_cm += self.canvas.report_opts.box_mgap
if box.level[1] == 0 and self.__last_direct[level]:
#ok, a new direct descendant.
#print level, box.father is not None, self.__last_direct[level].father is not None, box.text[0], \
# self.__last_direct[level].text[0]
if box.father != self.__last_direct[level].father and \
box.father != self.__last_direct[level]:
box.y_cm += self.canvas.report_opts.box_pgap
self.cols[level] = box
if box.level[1] == 0:
self.__last_direct[level] = box
box.x_cm = self.canvas.report_opts.spouse_offset * box.level[1]
self.canvas.set_box_height_width(box)
def add_person_box(self, level, indi_handle, fams_handle, father):
""" Makes a person box and add that person into the Canvas. """
myself = PersonBox(level)
myself.father = father
if myself.level[1] == 0 and self.bold_direct and self.bold_now:
if self.bold_now == 1:
self.bold_now = 0
myself.set_bold()
if level[1] == 0 and father and myself.level[0] != father.level[0]:
#I am a child
if father.line_to:
line = father.line_to
else:
line = LineBase(father)
father.line_to = line
#self.canvas.add_line(line)
line.end.append(myself)
#calculate the text.
myself.calc_text(self.database, indi_handle, fams_handle)
myself.add_mark(self.database,
self.database.get_person_from_handle(indi_handle))
self.add_to_col(myself)
self.canvas.add_box(myself)
return myself
def add_marriage_box(self, level, indi_handle, fams_handle, father):
""" Makes a marriage box and add that person into the Canvas. """
myself = FamilyBox(level)
#if father is not None:
# myself.father = father
#calculate the text.
myself.calc_text(self.database, indi_handle, fams_handle)
self.add_to_col(myself)
self.canvas.add_box(myself)
return myself
def recurse(self, person_handle, x_level, s_level, father):
"""traverse the ancestors recursively until
either the end of a line is found,
or until we reach the maximum number of generations
or we reach the max number of spouses
that we want to deal with"""
if not person_handle: return
if x_level > self.max_generations: return
if s_level > 0 and s_level == self.max_spouses: return
if person_handle in self.families_seen: return
myself = None
person = self.database.get_person_from_handle(person_handle)
family_handles = person.get_family_handle_list()
if s_level == 0:
val = family_handles[0] if family_handles else None
myself = self.add_person_box( (x_level, s_level),
person_handle, val, father)
marr = None
spouse = None
if s_level == 1:
tmp_bold = self.bold_now
self.bold_now = 0
for family_handle in family_handles:
if family_handle not in self.families_seen:
self.families_seen.add(family_handle)
family = self.database.get_family_from_handle(family_handle)
#Marriage box if the option is there.
if self.inlc_marr and self.max_spouses > 0:
marr = self.add_marriage_box((x_level, s_level+1),
person_handle, family_handle,
father if s_level else myself)
spouse_handle = ReportUtils.find_spouse(person, family)
if self.max_spouses > s_level and \
spouse_handle not in self.families_seen:
def _spouse_box(who):
return self.add_person_box((x_level, s_level+1),
spouse_handle, family_handle, who)
if s_level > 0:
spouse = _spouse_box(father)
elif self.inlc_marr:
spouse = _spouse_box(marr)
else:
spouse = _spouse_box(myself)
mykids = [kid.ref for kid in family.get_child_ref_list()]
def _child_recurse(who):
self.recurse(child_ref, x_level+1, 0, who)
for child_ref in mykids:
if self.inlc_marr and self.max_spouses > 0:
_child_recurse(marr)
elif spouse:
_child_recurse(spouse)
else:
_child_recurse(myself)
if self.max_spouses > s_level and \
spouse_handle not in self.families_seen:
#spouse_handle = ReportUtils.find_spouse(person,family)
self.recurse(spouse_handle, x_level, s_level+1, spouse)
if s_level == 1:
self.bold_now = tmp_bold
def add_family(self, level, family, father2):
"""
Adds a family into the canvas.
only will be used for my direct grandparents, and my parents only.
"""
family_h = family.get_handle()
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
self.bold_now = 2
if father_h:
father_b = self.add_person_box(
(level, 0), father_h, family_h, father2)
else:
father_b = self.add_person_box(
(level, 0), None, None, father2)
retrn = [father_b]
if self.inlc_marr:
family_b = self.add_marriage_box(
(level, 1), father_h, family_h, father_b)
retrn.append(family_b)
self.families_seen.add(family_h)
if mother_h:
mother_b = self.add_person_box(
(level, 0), mother_h, family_h, father_b)
else:
mother_b = self.add_person_box(
(level, 0), None, None, father_b)
retrn.append(mother_b)
family_line = family_b if self.inlc_marr else father_b
for child_ref in family.get_child_ref_list():
self.recurse(child_ref.ref, level+1, 0, family_line)
self.bold_now = 0
#Set up the lines for the family
if not family_line.line_to:
#no children.
family_line.line_to = LineBase(family_line)
if self.inlc_marr:
family_line.line_to.start.append(father_b)
family_line.line_to.start.append(mother_b)
return retrn
def has_children(self, person_handle):
"""
Quickly check to see if this person has children
still we want to respect the FamiliesSeen list
"""
if not person_handle or person_handle in self.families_seen:
return False
person = self.database.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
if family_handle not in self.families_seen:
family = self.database.get_family_from_handle(family_handle)
if family.get_child_ref_list():
return True
return False
def recurse_if(self, person_handle, level):
"""
Quickly check to see if we want to continue recursion
still we want to respect the FamiliesSeen list
"""
person = self.database.get_person_from_handle(person_handle)
show = False
myfams = person.get_family_handle_list()
if len(myfams) > 1: #and self.max_spouses > 0
show = True
if not self.inlc_marr:
#if the condition is true, we only want to show
#this parent again IF s/he has other children
show = self.has_children(person_handle)
#if self.max_spouses == 0 and not self.has_children(person_handle):
# self.families_seen.add(person_handle)
# show = False
if show:
self.bold_now = 1
self.recurse(person_handle, level, 0, None)
#------------------------------------------------------------------------
#
# Class MakePersonTree (Personal Descendant Tree option)
#
#------------------------------------------------------------------------
class MakePersonTree(RecurseDown):
"""
The main procedure to use recursion to make the tree based off of a person.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
RecurseDown.__init__(self, dbase, canvas)
self.max_generations -= 1
def start(self, person_id):
"""follow the steps to make a tree off of a person"""
persons = []
center1 = self.database.get_person_from_gramps_id(person_id)
if center1 is None:
raise ReportError(_("Person %s is not in the Database") % person_id)
center1_h = center1.get_handle() #could be mom too.
family2 = family2_h = None
if self.do_parents:
family2_h = center1.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
mother2_h = father2_h = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
#######################
#don't do center person's parents family.
if family2_h:
self.families_seen.add(family2_h)
#######################
#Center person's Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#######################
#Center persons parents only!
#######################
#now it will ONLY be my fathers parents
if family2:
self.add_family( 0, family2, None )
else:
self.bold_now = 2
self.recurse(center1_h, 0, 0, None)
self.bold_now = 0
#######################
#Center person's mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
return persons
#------------------------------------------------------------------------
#
# Class MakeFamilyTree (Familial Descendant Tree option)
#
#------------------------------------------------------------------------
class MakeFamilyTree(RecurseDown):
"""
The main procedure to use recursion to make the tree based off of a family.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
RecurseDown.__init__(self, dbase, canvas)
def start(self, family_id):
"""follow the steps to make a tree off of a family"""
## (my) referes to the children of family_id
# Step 1 print out my fathers, fathers,
# other wives families first (if needed)
family1 = self.database.get_family_from_gramps_id(family_id)
if family1 is None:
raise ReportError(_("Family %s is not in the Database") % family_id)
family1_h = family1.get_handle()
#######################
#Initial setup of variables
#######################
father1_h = family1.get_father_handle()
mother1_h = family1.get_mother_handle()
father1 = mother1 = family2 = family2_h = None
if father1_h:
father1 = self.database.get_person_from_handle(father1_h)
if self.do_parents: #b3 - remove grandparents?
family2_h = father1.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
if mother1_h:
mother1 = self.database.get_person_from_handle(mother1_h)
mother2_h = father2_h = None
if family2: #family2 = fathers parents
mother2_h = family2.get_mother_handle()
mother2 = self.database.get_person_from_handle(mother2_h)
father2_h = family2.get_father_handle()
father2 = self.database.get_person_from_handle(father2_h)
#Helper variables. Assigned in one section, used in another.
father2_id = family2_id = None
mother1_id = None
#######################
#don't do my fathers parents family. will be done later
if family2_h:
self.families_seen.add(family2_h)
#######################
#my father mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
#######################
#father Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#######################
#don't do my parents family in recurse. will be done later
self.families_seen.add(family1_h)
##If dad has no other children from other marriages. remove him
if self.max_spouses == 0 and not self.has_children(father1_h):
self.families_seen.add(father1_h)
#######################
#my fathers parents!
#######################
#now it will ONLY be my fathers parents
#will print dads parents. dad's other wifes will also print
if family2:
myfams = father1.get_family_handle_list()
show = False
if len(myfams) > 1:
show = True
if not self.inlc_marr and self.max_spouses == 0:
#if the condition is true, we only want to show
#this parent again IF s/he has children
show = self.has_children(father1_h)
if not show:
self.families_seen.add(father1_h)
family2_l = self.add_family( 0, family2, None )
elif father1:
#######################
#my father other wives (if all of the above does nothing)
#if my father does not have parents (he is the highest)
#######################
#do his OTHER wives first.
self.recurse_if(father1_h, 1)
#######################
#my father, marriage info, mother, siblings, me
#######################
if family2:
#We need to add dad to the family
family2_line = family2_l[1] if self.inlc_marr else family2_l[0]
else:
family2_line = None
family1_l = self.add_family(1, family1, family2_line)
mother1_b = family1_l[-1] #Mom's Box
#make sure there is at least one child in this family.
#if not put in a placeholder
family1_line = family1_l[1] if self.inlc_marr else family1_l[0]
if family1_line.line_to.end == []:
box = PlaceHolderBox((mother1_b.level[0]+1, 0))
box.father = family1_l[0]
self.add_to_col(box)
family1_line.line_to.end = [box]
#######################
#######################
#Lower half
#This will be quite like the first half.
#Just on the mothers side...
#Mom has already been printed with the family
#######################
#######################
#######################
#Initial setup of variables
#######################
mother1_h = family1.get_mother_handle()
family2_h = mother1 = family2 = None
if mother1_h:
mother1 = self.database.get_person_from_handle(mother1_h)
if self.do_parents: #b3 - remove grandparents?
family2_h = mother1.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
mother2_h = father2_h = None
if family2:
mother2_h = family2.get_mother_handle()
mother2 = self.database.get_person_from_handle(mother2_h)
father2_h = family2.get_father_handle()
father2 = self.database.get_person_from_handle(father2_h)
#######################
#don't do my parents family.
self.families_seen = set([family1_h] )
##If mom has no other children from other marriages. remove her
if self.max_spouses == 0 and not self.has_children(mother1_h):
self.families_seen.add(mother1_h)
if mother1_h:
myfams = mother1.get_family_handle_list()
if len(myfams) < 2:
#If mom didn't have any other families, don't even do her
#she is already here with dad and will be added later
self.families_seen.add(mother1_h)
#######################
#my mother other spouses (if no parents)
#######################
#if my mother does not have parents (she is the highest)
#Then do her OTHER spouses.
if not family2 and mother1:
self.recurse_if(mother1_h, 1)
#######################
#my mothers parents!
#######################
if family2:
family2_l = self.add_family( 0, family2, None )
family2_line = family2_l[1] if self.inlc_marr else family2_l[0]
family2_line = family2_line.line_to
if family2_line.end != []:
family2_line.end.insert(0, mother1_b)
else:
family2_line.end = [mother1_b]
#fix me. Moms other siblings have been given an extra space
#Because Moms-father is not siblings-father right now.
mother1_b.father = family2_line
#######################
#my mother mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
#######################
#mother Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#------------------------------------------------------------------------
#
# Class MakeReport
#
#------------------------------------------------------------------------
class MakeReport(object):
"""
Make a report out of a list of people.
The list of people is already made. Use this information to find where
people will be placed on the canvas.
"""
def __init__(self, dbase, canvas, ind_spouse, compress_tree):
self.database = dbase
self.canvas = canvas
gui = GuiConnect()
self.do_parents = gui.get_val('show_parents')
self.inlc_marr = gui.get_val("inc_marr")
self.max_spouses = gui.get_val('maxspouse')
gui = None
self.ind_spouse = ind_spouse
self.compress_tree = compress_tree
self.cols = [[]]
#self.max_generations = 0
#already done in recurse,
#Some of this code needs to be moved up to RecurseDown.add_to_col()
def calc_box(self, box):
""" calculate the max_box_width and max_box_height for the report """
width = box.x_cm + box.width
if width > self.canvas.report_opts.max_box_width:
self.canvas.report_opts.max_box_width = width
if box.height > self.canvas.report_opts.max_box_height:
self.canvas.report_opts.max_box_height = box.height
while len(self.cols) <= box.level[0]:
self.cols.append([])
self.cols[box.level[0]].append(box)
#tmp = box.level[0]
#if tmp > self.max_generations:
# self.max_generations = tmp
def __move_col_from_here_down(self, box, amount):
"""Move me and everyone below me in this column only down"""
while box:
box.y_cm += amount
box = box.next
def __move_next_cols_from_here_down(self, box, amount):
"""Move me, everyone below me in this column,
and all of our children (and childrens children) down."""
col = [box]
while col:
if len(col) == 1 and col[0].line_to:
col.append(col[0].line_to.end[0])
col[0].y_cm += amount
col[0] = col[0].next
if col[0] is None:
col.pop(0)
def __next_family_group(self, box):
""" a helper function. Assume box is at the start of a family block.
get this family block. """
while box:
left_group = []
line = None
#Form the parental (left) group.
#am I a direct descendant?
if box.level[1] == 0:
#I am the father/mother.
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.next
if box and box.level[1] != 0 and self.inlc_marr:
#add/start with the marriage box
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.next
if box and box.level[1] != 0 and self.max_spouses > 0:
#add/start with the spousal box
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.next
if line:
if len(line.start) > 1 and line.start[-1].level[1] == 0:
#a dad and mom family from RecurseDown.add_family. add mom
left_group.append(line.start[-1])
box = box.next
#we now have everyone we want
return left_group, line.end
#else
# no children, so no family. go again until we find one to return.
return None, None
def __reverse_family_group(self):
""" go through the n-1 to 0 cols of boxes looking for families
(parents with children) that may need to be moved. """
for x_col in range(len(self.cols)-1, -1, -1):
box = self.cols[x_col][0] #The first person in this col
while box:
left_group, right_group = self.__next_family_group(box)
if not left_group:
box = None #we found the end of this col
else:
yield left_group, right_group
box = left_group[-1].next
def __calc_movements(self, left_group, right_group):
""" for a family group, see if parents or children need to be
moved down so everyone is the the right/left of each other.
return a right y_cm and a left y_cm. these points will be used
to move parents/children down.
"""
left_up = left_group[0].y_cm
right_up = right_group[0].y_cm
left_center = left_up
right_center = right_up
if self.compress_tree:
#calculate a new left and right move points
for left_line in left_group:
if left_line.line_to:
break
left_center = left_line.y_cm + (left_line.height /2)
left_down = left_group[-1].y_cm + left_group[-1].height
right_down = right_group[-1].y_cm + right_group[-1].height
#Lazy. Move down either side only as much as we NEED to.
if left_center < right_up:
right_center = right_group[0].y_cm
elif left_up == right_up:
left_center = left_up #Lets keep it. top line.
elif left_center > right_down:
right_center = right_down
else:
right_center = left_center
return right_center, left_center
def Make_report(self):
"""
Everyone on the page is as far up as they can go.
Move them down to where they belong.
We are going to go through everyone from right to left
top to bottom moving everyone down as needed to make the report.
"""
seen_parents = False
for left_group, right_group in self.__reverse_family_group():
right_y_cm, left_y_cm = self.__calc_movements(left_group,
right_group)
#1. Are my children too high? if so move then down!
if right_y_cm < left_y_cm:
#we have to push our kids (and their kids) down.
#We also need to push down all the kids (under)
#these kids (in their column)
amt = (left_y_cm - right_y_cm)
self.__move_next_cols_from_here_down(right_group[0], amt)
#2. Am I (and spouses) too high? if so move us down!
elif left_y_cm < right_y_cm:
#Ok, I am too high. Move me down
amt = (right_y_cm - left_y_cm)
self.__move_col_from_here_down(left_group[0], amt)
#6. now check to see if we are working with dad and mom.
#if so we need to move down marriage information
#and mom!
left_line = left_group[0].line_to
if not left_line:
left_line = left_group[1].line_to
#left_line = left_line.start
if len(left_line.start) > 1 and not seen_parents:
#only do Dad and Mom. len(left_line) > 1
seen_parents = True
mom_cm = left_group[-1].y_cm + left_group[-1].height/2
last_child_cm = right_group[-1].y_cm
if not self.compress_tree:
last_child_cm += right_group[-1].height/2
move_amt = last_child_cm - mom_cm
#if the moms height is less than the last childs height
#The 0.2 is to see if this is even worth it.
if move_amt > 0.2:
#our children take up more space than us parents.
#so space mom out!
self.__move_col_from_here_down(left_group[-1], move_amt)
#move marriage info
if self.inlc_marr:
left_group[1].y_cm += move_amt/2
if left_line.end[0].boxstr == 'None':
left_line.end = []
def start(self):
"""Make the report"""
#for person in self.persons.depth_first_gen():
for box in self.canvas.boxes:
self.calc_box(box)
#At this point we know everything we need to make the report.
#Width of each column of people - self.rept_opt.box_width
#width of each column (or row) of lines - self.rept_opt.col_width
if not self.cols[0]:
#We wanted to print parents of starting person/family but
#there were none!
#remove column 0 and move everyone back one level
self.cols.pop(0)
for box in self.canvas.boxes:
box.level = (box.level[0] - 1, box.level[1])
#go ahead and set it now.
width = self.canvas.report_opts.max_box_width
for box in self.canvas.boxes:
box.width = width - box.x_cm
box.x_cm += self.canvas.report_opts.littleoffset
box.x_cm += (box.level[0] *
(self.canvas.report_opts.col_width +
self.canvas.report_opts.max_box_width))
box.y_cm += self.canvas.report_opts.littleoffset
box.y_cm += self.canvas.title.height
self.Make_report()
class GuiConnect():
""" This is a BORG object. There is ONLY one.
This give some common routines that EVERYONE can use like
get the value from a GUI variable
"""
__shared_state = {}
def __init__(self): #We are BORG!
self.__dict__ = self.__shared_state
def set__opts(self, options, which):
self._opts = options
self._which_report = which.split(",")[0]
def get_val(self, val):
""" Get a GUI value. """
value = self._opts.get_option_by_name(val)
if value:
return value.get_value()
else:
False
def Title_class(self, database, doc):
Title_type = self.get_val('report_title')
if Title_type == 0: #None
return TitleNone(database, doc)
if Title_type == 1: #Descendant Chart
if self._which_report == _RPT_NAME:
if self.get_val('show_parents'):
return TitleDPY(database, doc)
else:
return TitleDPN(database, doc)
else:
if self.get_val('show_parents'):
return TitleDFY(database, doc)
else:
return TitleDFN(database, doc)
if Title_type == 2:
return TitleF(database, doc)
else: #Title_type == 3
return TitleC(database, doc)
def Make_Tree(self, database, canvas):
if self._which_report == _RPT_NAME:
return MakePersonTree(database, canvas)
else:
return MakeFamilyTree(database, canvas)
def calc_lines(self, database):
#calculate the printed lines for each box
display_repl = self.get_val("replace_list")
#str = ""
#if self.get_val('miss_val'):
# str = "_____"
return CalcLines(database, display_repl)
def working_lines(self, box):
display = self.get_val("descend_disp")
#if self.get_val('diffspouse'):
display_spou = self.get_val("spouse_disp")
#else:
# display_spou = display
display_marr = [self.get_val("marr_disp")]
if box.boxstr == "CG2-fam-box": #(((((
workinglines = display_marr
elif box.level[1] > 0 or (box.level[0] == 0 and box.father):
workinglines = display_spou
else:
workinglines = display
return workinglines
#------------------------------------------------------------------------
#
# DescendTree
#
#------------------------------------------------------------------------
class DescendTree(Report):
def __init__(self, database, options, user):
"""
Create DescendTree object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
"""
Report.__init__(self, database, options, user)
self.options = options
self.database = database
""" make the report in its full size and pages to print on
scale one or both as needed/desired.
"""
database = self.database
self.Connect = GuiConnect()
self.Connect.set__opts(self.options.menu, self.options.name)
style_sheet = self.doc.get_style_sheet()
font_normal = style_sheet.get_paragraph_style("CG2-Normal").get_font()
#The canvas that we will put our report on and print off of
self.canvas = Canvas(self.doc,
ReportOptions(self.doc, font_normal, "CG2-line"))
self.canvas.report_opts.box_shadow *= \
self.Connect.get_val('shadowscale')
self.canvas.report_opts.box_pgap *= self.Connect.get_val('box_Yscale')
self.canvas.report_opts.box_mgap *= self.Connect.get_val('box_Yscale')
center_id = self.Connect.get_val('pid')
#make the tree
tree = self.Connect.Make_Tree(database, self.canvas)
tree.start(center_id)
tree = None
#Title
title = self.Connect.Title_class(database, self.doc)
title.calc_title(center_id)
self.canvas.add_title(title)
#make the report as big as it wants to be.
ind_spouse = self.Connect.get_val("ind_spouse")
compress_tree = self.Connect.get_val('compress_tree')
report = MakeReport(database, self.canvas, ind_spouse, compress_tree)
report.start()
report = None
#note?
if self.Connect.get_val("inc_note"):
note_box = NoteBox(self.doc, "CG2-note-box",
self.Connect.get_val("note_place"))
subst = SubstKeywords(self.database, None, None)
note_box.text = subst.replace_and_clean(
self.Connect.get_val('note_disp'))
self.canvas.add_note(note_box)
#Now we have the report in its full size.
#Do we want to scale the report?
one_page = self.Connect.get_val("resize_page")
scale_report = self.Connect.get_val("scale_tree")
scale = self.canvas.scale_report(one_page,
scale_report != 0, scale_report == 2)
if scale != 1 or self.Connect.get_val('shadowscale') != 1.0:
self.scale_styles(scale)
def write_report(self):
""" Canvas now has everyone ready to print. Get some misc stuff
together and print. """
one_page = self.Connect.get_val("resize_page")
scale_report = self.Connect.get_val("scale_tree")
#Inlc_marr = self.Connect.get_val("inc_marr")
inc_border = self.Connect.get_val('inc_border')
incblank = self.Connect.get_val("inc_blank")
prnnum = self.Connect.get_val("inc_pagenum")
#ind_spouse = self.Connect.get_val("ind_spouse")
lines = self.Connect.get_val('note_disp')
#####################
#Setup page information
colsperpage = self.doc.get_usable_width()
colsperpage += self.canvas.report_opts.col_width
tmp = self.canvas.report_opts.max_box_width
tmp += self.canvas.report_opts.col_width
colsperpage = int(colsperpage / tmp)
colsperpage = colsperpage or 1
#####################
#Vars
#p = self.doc.get_style_sheet().get_paragraph_style("CG2-Normal")
#font = p.get_font()
if prnnum:
page_num_box = PageNumberBox(self.doc, 'CG2-box')
#####################
#ok, everyone is now ready to print on the canvas. Paginate?
self.canvas.sort_boxes_on_y_cm()
self.canvas.paginate(colsperpage, one_page)
#####################
#Yeah!!!
#lets finally make some pages!!!
#####################
for page in self.canvas.page_iter_gen(incblank):
self.doc.start_page()
#do we need to print a border?
if inc_border:
page.draw_border('CG2-line')
#Do we need to print the page number?
if prnnum:
page_num_box.display(page)
page.display()
self.doc.end_page()
def scale_styles(self, amount):
"""
Scale the styles for this report. This must be done in the constructor.
"""
style_sheet = self.doc.get_style_sheet()
graph_style = style_sheet.get_draw_style("CG2-fam-box")
graph_style.set_shadow(graph_style.get_shadow(), 0)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-fam-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2-box")
graph_style.set_shadow(graph_style.get_shadow(),
self.canvas.report_opts.box_shadow * amount)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2b-box")
graph_style.set_shadow(graph_style.get_shadow(),
self.canvas.report_opts.box_shadow * amount)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2b-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2-note-box")
graph_style.set_shadow(graph_style.get_shadow(), 0)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-note-box", graph_style)
para_style = style_sheet.get_paragraph_style("CG2-Title")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Title", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Normal")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Normal", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Bold")
font = para_style.get_font()
font.set_bold(True)
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Bold", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Note")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Note", para_style)
self.doc.set_style_sheet(style_sheet)
#------------------------------------------------------------------------
#
# DescendTreeOptions
#
#------------------------------------------------------------------------
class DescendTreeOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__pid = None
self.__onepage = None
self.__inc_title = None
self.__title = None
self.__blank = None
self.scale = None
self.__db = dbase
self.name = name
self.box_Y_sf = None
self.box_shadow_sf = None
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the descendant report.
"""
##################
category_name = _("Tree Options")
if self.name.split(",")[0] == _RPT_NAME:
self.__pid = PersonOption(_("Report for"))
self.__pid.set_help(_("The main person for the report"))
menu.add_option(category_name, "pid", self.__pid)
else: #if self.name == "familial_descend_tree":
self.__pid = FamilyOption(_("Report for"))
self.__pid.set_help(_("The main family for the report"))
menu.add_option(category_name, "pid", self.__pid)
self.showparents = BooleanOption(
_('Start with the parent(s) of the selected first'),
False)
self.showparents.set_help(
_("Will show the parents, brother and sisters of the "
"selected person.")
)
menu.add_option(category_name, "show_parents", self.showparents)
max_gen = NumberOption(_("Generations"), 10, 1, 50)
max_gen.set_help(_("The number of generations to include in the tree"))
menu.add_option(category_name, "maxgen", max_gen)
max_spouse = NumberOption(_("Level of Spouses"), 1, 0, 10)
max_spouse.set_help(_("0=no Spouses, 1=include Spouses, 2=include "
"Spouses of the spouse, etc"))
menu.add_option(category_name, "maxspouse", max_spouse)
compresst = BooleanOption(_('Co_mpress tree'), False)
compresst.set_help(_("Whether to move people up, where possible, "
"resulting in a smaller tree"))
menu.add_option(category_name, "compress_tree", compresst)
##################
category_name = _("Display")
disp = TextOption(_("Descendant\nDisplay Format"),
["$n",
"%s $b" %_BORN,
"{%s $d}" %_DIED])
disp.set_help(_("Display format for a descendant."))
menu.add_option(category_name, "descend_disp", disp)
bold = BooleanOption(_('Bold direct descendants'), True)
bold.set_help(
_("Whether to bold those people that are direct "
"(not step or half) descendants.")
)
menu.add_option(category_name, "bolddirect", bold)
#bug 4767
#diffspouse = BooleanOption(
# _("Use separate display format for spouses"),
# True)
#diffspouse.set_help(_("Whether spouses can have a different format."))
#menu.add_option(category_name, "diffspouse", diffspouse)
indspouce = BooleanOption(_('Indent Spouses'), True)
indspouce.set_help(_("Whether to indent the spouses in the tree."))
menu.add_option(category_name, "ind_spouse", indspouce)
sdisp = TextOption(_("Spousal\nDisplay Format"),
["$n",
"%s $b" %_BORN,
"{%s $d}" %_DIED])
sdisp.set_help(_("Display format for a spouse."))
menu.add_option(category_name, "spouse_disp", sdisp)
incmarr = BooleanOption(_('Include Marriage box'), True)
incmarr.set_help(
_("Whether to include a separate marital box in the report"))
menu.add_option(category_name, "inc_marr", incmarr)
marrdisp = StringOption(_("Marriage\nDisplay Format"), "%s $m" % _MARR)
marrdisp.set_help(_("Display format for the marital box."))
menu.add_option(category_name, "marr_disp", marrdisp)
##################
category_name = _("Replace")
repldisp = TextOption(
_("Replace Display Format:\n'Replace this'/' with this'"),
[])
repldisp.set_help(_("i.e.\nUnited States of America/U.S.A"))
menu.add_option(category_name, "replace_list", repldisp)
##################
category_name = _("Size")
self.scale = EnumeratedListOption(_("Scale tree to fit"), 0)
self.scale.add_item( 0, _("Do not scale tree"))
self.scale.add_item( 1, _("Scale tree to fit page width only"))
self.scale.add_item( 2, _("Scale tree to fit the size of the page"))
self.scale.set_help(
_("Whether to scale the tree to fit a specific paper size")
)
menu.add_option(category_name, "scale_tree", self.scale)
self.scale.connect('value-changed', self.__check_blank)
if "BKI" not in self.name.split(","):
self.__onepage = BooleanOption(_("Resize Page to Fit Tree size\n"
"\n"
"Note: Overrides options in the 'Paper Option' tab"
),
False)
self.__onepage.set_help(
_("Whether to resize the page to fit the size \n"
"of the tree. Note: the page will have a \n"
"non standard size.\n"
"\n"
"With this option selected, the following will happen:\n"
"\n"
"With the 'Do not scale tree' option the page\n"
" is resized to the height/width of the tree\n"
"\n"
"With 'Scale tree to fit page width only' the height of\n"
" the page is resized to the height of the tree\n"
"\n"
"With 'Scale tree to fit the size of the page' the page\n"
" is resized to remove any gap in either height or width"
))
menu.add_option(category_name, "resize_page", self.__onepage)
self.__onepage.connect('value-changed', self.__check_blank)
else:
self.__onepage = None
self.box_Y_sf = NumberOption(_("inter-box Y scale factor"),
1.00, 0.10, 2.00, 0.01)
self.box_Y_sf.set_help(_("Make the inter-box Y bigger or smaller"))
menu.add_option(category_name, "box_Yscale", self.box_Y_sf)
self.box_shadow_sf = NumberOption(_("box shadow scale factor"),
1.00, 0.00, 2.00, 0.01) # down to 0
self.box_shadow_sf.set_help(_("Make the box shadow bigger or smaller"))
menu.add_option(category_name, "shadowscale", self.box_shadow_sf)
##################
category_name = _("Include")
self.title = EnumeratedListOption(_("Report Title"), 0)
self.title.add_item( 0, _("Do not include a title"))
self.title.add_item( 1, _("Descendant Chart for [selected person(s)]"))
self.title.set_help(_("Choose a title for the report"))
menu.add_option(category_name, "report_title", self.title)
self.showparents.connect('value-changed', self.__Title_enum)
border = BooleanOption(_('Include a border'), False)
border.set_help(_("Whether to make a border around the report."))
menu.add_option(category_name, "inc_border", border)
prnnum = BooleanOption(_('Include Page Numbers'), False)
prnnum.set_help(_("Whether to include page numbers on each page."))
menu.add_option(category_name, "inc_pagenum", prnnum)
self.__blank = BooleanOption(_('Include Blank Pages'), True)
self.__blank.set_help(_("Whether to include pages that are blank."))
menu.add_option(category_name, "inc_blank", self.__blank)
#category_name = _("Notes")
self.usenote = BooleanOption(_('Include a note'), False)
self.usenote.set_help(
_("Whether to include a note on the report.")
)
menu.add_option(category_name, "inc_note", self.usenote)
self.notedisp = TextOption(_("Note"),[])
self.notedisp.set_help(_("Add a note"
"\n\n$T inserts today's date"))
menu.add_option(category_name, "note_disp", self.notedisp)
locals = NoteType(0)
notelocal = EnumeratedListOption(_("Note Location"), 2)
for num, text in locals.note_locals():
notelocal.add_item( num, text )
notelocal.set_help(_("Where to place the note."))
menu.add_option(category_name, "note_place", notelocal)
def __check_blank(self):
"""dis/enables the 'print blank pages' checkbox"""
if self.__onepage:
value = not self.__onepage.get_value()
else:
value = True
off = value and (self.scale.get_value() != 2)
self.__blank.set_available( off )
def __Title_enum(self):
item_list = [
[0, _("Do not include a title") ],
[1, _("Descendant Chart for [selected person(s)]") ],
]
if self.name.split(",")[0] != _RPT_NAME:
item_list.append(
[2, _("Family Chart for [names of chosen family]") ]
)
if self.showparents.get_value():
item_list.append(
[3, _("Cousin Chart for [names of children]") ]
)
self.title.set_items(item_list)
def make_default_style(self, default_style):
"""Make the default output style for the Descendant Tree."""
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER)
## Paragraph Styles:
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_alignment(PARA_ALIGN_CENTER)
para_style.set_description(
_("The basic style used for the title display.")
)
default_style.add_paragraph_style("CG2-Title", para_style)
font = FontStyle()
font.set_size(9)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The basic style used for the text display.')
)
default_style.add_paragraph_style("CG2-Normal", para_style)
#Set the size of the shadow based on the font size! Much better
#will be set later too.
box_shadow = PT2CM(font.get_size()) * .6
font.set_bold(True)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The bold style used for the text display.')
)
default_style.add_paragraph_style("CG2-Bold", para_style)
font = FontStyle()
font.set_size(9)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The basic style used for the note display.')
)
default_style.add_paragraph_style("CG2-Note", para_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Title")
graph_style.set_color((0, 0, 0))
graph_style.set_fill_color((255, 255, 255))
graph_style.set_line_width(0)
default_style.add_draw_style("CG2-Title", graph_style)
## Draw styles
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Normal")
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("CG2-fam-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Normal")
graph_style.set_shadow(1, box_shadow)
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("CG2-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Bold")
graph_style.set_shadow(1, box_shadow)
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("CG2b-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Note")
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("CG2-note-box", graph_style)
graph_style = GraphicsStyle()
default_style.add_draw_style("CG2-line", graph_style)
#=====================================
#So do not fear, for I am with you; do not be dismayed,
#for I am your God. I will strengthen you and help you;
#I will uphold you with my righteous right hand.
#Isaiah 41:10
|
arunkgupta/gramps
|
gramps/plugins/drawreport/descendtree.py
|
Python
|
gpl-2.0
| 66,162
|
[
"Brian"
] |
45a91e231561258647ccfc0ba7ca4e441de30662548c054e1898b186f8417267
|
"""
End-to-end tests for the LMS.
"""
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.course_wiki import (
CourseWikiChildrenPage,
CourseWikiEditPage,
CourseWikiHistoryPage,
CourseWikiPage
)
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.tests.helpers import (
UniqueCourseTest,
)
from openedx.core.lib.tests import attr
@attr('a11y')
class CourseWikiA11yTest(UniqueCourseTest):
"""
Tests that verify the course wiki.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super().setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_wiki_page = CourseWikiPage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.course_wiki_edit_page = CourseWikiEditPage(self.browser, self.course_id, self.course_info)
self.tab_nav = TabNavPage(self.browser)
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
# Access course wiki page
self.course_home_page.visit()
self.tab_nav.go_to_tab('Wiki')
def _open_editor(self):
self.course_wiki_page.open_editor()
self.course_wiki_edit_page.wait_for_page()
def test_view(self):
"""
Verify the basic accessibility of the wiki page as initially displayed.
"""
self.course_wiki_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.course_wiki_page.a11y_audit.check_for_accessibility_errors()
def test_edit(self):
"""
Verify the basic accessibility of edit wiki page.
"""
self._open_editor()
self.course_wiki_edit_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.course_wiki_edit_page.a11y_audit.check_for_accessibility_errors()
def test_changes(self):
"""
Verify the basic accessibility of changes wiki page.
"""
self.course_wiki_page.show_history()
history_page = CourseWikiHistoryPage(self.browser, self.course_id, self.course_info)
history_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
history_page.wait_for_page()
history_page.a11y_audit.check_for_accessibility_errors()
def test_children(self):
"""
Verify the basic accessibility of changes wiki page.
"""
self.course_wiki_page.show_children()
children_page = CourseWikiChildrenPage(self.browser, self.course_id, self.course_info)
children_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
children_page.wait_for_page()
children_page.a11y_audit.check_for_accessibility_errors()
|
eduNEXT/edunext-platform
|
common/test/acceptance/tests/lms/test_lms.py
|
Python
|
agpl-3.0
| 3,832
|
[
"VisIt"
] |
eee81a947717177243ee5da45af33593fee53e8d9f60d88b7174de6201d018e5
|
from sqlalchemy import *
from sqlalchemy.sql import table, column, ClauseElement, operators
from sqlalchemy.sql.expression import _clone, _from_objects
from sqlalchemy.testing import fixtures, AssertsExecutionResults, \
AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.sql.visitors import ClauseVisitor, CloningVisitor, \
cloned_traverse, ReplacingCloningVisitor
from sqlalchemy import exc
from sqlalchemy.sql import util as sql_util
from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
class TraversalTest(fixtures.TestBase, AssertsExecutionResults):
"""test ClauseVisitor's traversal, particularly its
ability to copy and modify a ClauseElement in place."""
@classmethod
def setup_class(cls):
global A, B
# establish two ficticious ClauseElements.
# define deep equality semantics as well as deep
# identity semantics.
class A(ClauseElement):
__visit_name__ = 'a'
def __init__(self, expr):
self.expr = expr
def is_other(self, other):
return other is self
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
return other.expr == self.expr
def __ne__(self, other):
return other.expr != self.expr
def __str__(self):
return "A(%s)" % repr(self.expr)
class B(ClauseElement):
__visit_name__ = 'b'
def __init__(self, *items):
self.items = items
def is_other(self, other):
if other is not self:
return False
for i1, i2 in zip(self.items, other.items):
if i1 is not i2:
return False
return True
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return False
return True
def __ne__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return True
return False
def _copy_internals(self, clone=_clone):
self.items = [clone(i) for i in self.items]
def get_children(self, **kwargs):
return self.items
def __str__(self):
return "B(%s)" % repr([str(i) for i in self.items])
def test_test_classes(self):
a1 = A("expr1")
struct = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct2 = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct3 = B(a1, A("expr2"), B(A("expr1b"),
A("expr2bmodified")), A("expr3"))
assert a1.is_other(a1)
assert struct.is_other(struct)
assert struct == struct2
assert struct != struct3
assert not struct.is_other(struct2)
assert not struct.is_other(struct3)
def test_clone(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
class Vis(CloningVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert not struct.is_other(s2)
def test_no_clone(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
class Vis(ClauseVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert struct.is_other(s2)
def test_change_in_place(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
struct2 = B(A("expr1"), A("expr2modified"), B(A("expr1b"),
A("expr2b")), A("expr3"))
struct3 = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2bmodified")), A("expr3"))
class Vis(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2":
a.expr = "expr2modified"
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct != s2
assert not struct.is_other(s2)
assert struct2 == s2
class Vis2(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2b":
a.expr = "expr2bmodified"
def visit_b(self, b):
pass
vis2 = Vis2()
s3 = vis2.traverse(struct)
assert struct != s3
assert struct3 == s3
def test_visit_name(self):
# override fns in testlib/schema.py
from sqlalchemy import Column
class CustomObj(Column):
pass
assert CustomObj.__visit_name__ == Column.__visit_name__ == 'column'
foo, bar = CustomObj('foo', String), CustomObj('bar', String)
bin = foo == bar
set(ClauseVisitor().iterate(bin))
assert set(ClauseVisitor().iterate(bin)) == set([foo, bar, bin])
class BinaryEndpointTraversalTest(fixtures.TestBase):
"""test the special binary product visit"""
def _assert_traversal(self, expr, expected):
canary = []
def visit(binary, l, r):
canary.append((binary.operator, l, r))
print binary.operator, l, r
sql_util.visit_binary_product(visit, expr)
eq_(
canary, expected
)
def test_basic(self):
a, b = column("a"), column("b")
self._assert_traversal(
a == b,
[
(operators.eq, a, b)
]
)
def test_with_tuples(self):
a, b, c, d, b1, b1a, b1b, e, f = (
column("a"),
column("b"),
column("c"),
column("d"),
column("b1"),
column("b1a"),
column("b1b"),
column("e"),
column("f")
)
expr = tuple_(
a, b, b1 == tuple_(b1a, b1b == d), c
) > tuple_(
func.go(e + f)
)
self._assert_traversal(
expr,
[
(operators.gt, a, e),
(operators.gt, a, f),
(operators.gt, b, e),
(operators.gt, b, f),
(operators.eq, b1, b1a),
(operators.eq, b1b, d),
(operators.gt, c, e),
(operators.gt, c, f)
]
)
def test_composed(self):
a, b, e, f, q, j, r = (
column("a"),
column("b"),
column("e"),
column("f"),
column("q"),
column("j"),
column("r"),
)
expr = and_(
(a + b) == q + func.sum(e + f),
and_(
j == r,
f == q
)
)
self._assert_traversal(
expr,
[
(operators.eq, a, q),
(operators.eq, a, e),
(operators.eq, a, f),
(operators.eq, b, q),
(operators.eq, b, e),
(operators.eq, b, f),
(operators.eq, j, r),
(operators.eq, f, q),
]
)
def test_subquery(self):
a, b, c = column("a"), column("b"), column("c")
subq = select([c]).where(c == a).as_scalar()
expr = and_(a == b, b == subq)
self._assert_traversal(
expr,
[
(operators.eq, a, b),
(operators.eq, b, subq),
]
)
class ClauseTest(fixtures.TestBase, AssertsCompiledSQL):
"""test copy-in-place behavior of various ClauseElements."""
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2, t3
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
t3 = Table('table3', MetaData(),
Column('col1', Integer),
Column('col2', Integer)
)
def test_binary(self):
clause = t1.c.col2 == t2.c.col2
eq_(str(clause), str(CloningVisitor().traverse(clause)))
def test_binary_anon_label_quirk(self):
t = table('t1', column('col1'))
f = t.c.col1 * 5
self.assert_compile(select([f]),
"SELECT t1.col1 * :col1_1 AS anon_1 FROM t1")
f.anon_label
a = t.alias()
f = sql_util.ClauseAdapter(a).traverse(f)
self.assert_compile(select([f]),
"SELECT t1_1.col1 * :col1_1 AS anon_1 FROM t1 AS t1_1")
def test_join(self):
clause = t1.join(t2, t1.c.col2 == t2.c.col2)
c1 = str(clause)
assert str(clause) == str(CloningVisitor().traverse(clause))
class Vis(CloningVisitor):
def visit_binary(self, binary):
binary.right = t2.c.col3
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == str(t1.join(t2, t1.c.col2 == t2.c.col3))
def test_aliased_column_adapt(self):
clause = t1.select()
aliased = t1.select().alias()
aliased2 = t1.alias()
adapter = sql_util.ColumnAdapter(aliased)
f = select([
adapter.columns[c]
for c in aliased2.c
]).select_from(aliased)
s = select([aliased2]).select_from(aliased)
eq_(str(s), str(f))
f = select([
adapter.columns[func.count(aliased2.c.col1)]
]).select_from(aliased)
eq_(
str(select([func.count(aliased2.c.col1)]).select_from(aliased)),
str(f)
)
def test_aliased_cloned_column_adapt_inner(self):
clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# fixed by [ticket:2419]. the inside columns
# on aliased3 have _is_clone_of pointers to those of
# aliased2. corresponding_column checks these
# now.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2._raw_columns
])
f2 = select([
adapter.columns[c]
for c in aliased3._raw_columns
])
eq_(
str(f1), str(f2)
)
def test_aliased_cloned_column_adapt_exported(self):
clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2.c
])
f2 = select([
adapter.columns[c]
for c in aliased3.c
])
eq_(
str(f1), str(f2)
)
def test_aliased_cloned_schema_column_adapt_exported(self):
clause = select([t3.c.col1, func.foo(t3.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2.c
])
f2 = select([
adapter.columns[c]
for c in aliased3.c
])
eq_(
str(f1), str(f2)
)
def test_text(self):
clause = text(
"select * from table where foo=:bar",
bindparams=[bindparam('bar')])
c1 = str(clause)
class Vis(CloningVisitor):
def visit_textclause(self, text):
text.text = text.text + " SOME MODIFIER=:lala"
text.bindparams['lala'] = bindparam('lala')
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == c1 + " SOME MODIFIER=:lala"
assert clause.bindparams.keys() == ['bar']
assert set(clause2.bindparams.keys()) == set(['bar', 'lala'])
def test_select(self):
s2 = select([t1])
s2_assert = str(s2)
s3_assert = str(select([t1], t1.c.col2 == 7))
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
s3 = Vis().traverse(s2)
assert str(s3) == s3_assert
assert str(s2) == s2_assert
print str(s2)
print str(s3)
class Vis(ClauseVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
Vis().traverse(s2)
assert str(s2) == s3_assert
s4_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col3 == 9)))
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col3 == 9)
s4 = Vis().traverse(s3)
print str(s3)
print str(s4)
assert str(s4) == s4_assert
assert str(s3) == s3_assert
s5_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col1 == 9)))
class Vis(CloningVisitor):
def visit_binary(self, binary):
if binary.left is t1.c.col3:
binary.left = t1.c.col1
binary.right = bindparam("col1", unique=True)
s5 = Vis().traverse(s4)
print str(s4)
print str(s5)
assert str(s5) == s5_assert
assert str(s4) == s4_assert
def test_union(self):
u = union(t1.select(), t2.select())
u2 = CloningVisitor().traverse(u)
assert str(u) == str(u2)
assert [str(c) for c in u2.c] == [str(c) for c in u.c]
u = union(t1.select(), t2.select())
cols = [str(c) for c in u.c]
u2 = CloningVisitor().traverse(u)
assert str(u) == str(u2)
assert [str(c) for c in u2.c] == cols
s1 = select([t1], t1.c.col1 == bindparam('id_param'))
s2 = select([t2])
u = union(s1, s2)
u2 = u.params(id_param=7)
u3 = u.params(id_param=10)
assert str(u) == str(u2) == str(u3)
assert u2.compile().params == {'id_param':7}
assert u3.compile().params == {'id_param':10}
def test_in(self):
expr = t1.c.col1.in_(['foo', 'bar'])
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_over(self):
expr = func.row_number().over(order_by=t1.c.col1)
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_adapt_union(self):
u = union(
t1.select().where(t1.c.col1 == 4),
t1.select().where(t1.c.col1 == 5)
).alias()
assert sql_util.ClauseAdapter(u).traverse(t1) is u
def test_binds(self):
"""test that unique bindparams change their name upon clone()
to prevent conflicts"""
s = select([t1], t1.c.col1 == bindparam(None, unique=True)).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s], s.c.col2 == s2.c.col2)
self.assert_compile(s3,
"SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :param_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :param_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2")
s = select([t1], t1.c.col1 == 4).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s], s.c.col2 == s2.c.col2)
self.assert_compile(s3,
"SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :col1_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :col1_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2")
def test_extract(self):
s = select([extract('foo', t1.c.col1).label('col1')])
self.assert_compile(s,
"SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1")
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s2.c.col1])
self.assert_compile(s,
"SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1")
self.assert_compile(s3,
"SELECT anon_1.col1 FROM (SELECT EXTRACT(foo FROM "
"table1.col1) AS col1 FROM table1) AS anon_1")
@testing.emits_warning('.*replaced by another column with the same key')
def test_alias(self):
subq = t2.select().alias('subq')
s = select([t1.c.col1, subq.c.col1],
from_obj=[t1, subq,
t1.join(subq, t1.c.col1 == subq.c.col2)]
)
orig = str(s)
s2 = CloningVisitor().traverse(s)
assert orig == str(s) == str(s2)
s4 = CloningVisitor().traverse(s2)
assert orig == str(s) == str(s2) == str(s4)
s3 = sql_util.ClauseAdapter(table('foo')).traverse(s)
assert orig == str(s) == str(s3)
s4 = sql_util.ClauseAdapter(table('foo')).traverse(s3)
assert orig == str(s) == str(s3) == str(s4)
subq = subq.alias('subq')
s = select([t1.c.col1, subq.c.col1],
from_obj=[t1, subq,
t1.join(subq, t1.c.col1 == subq.c.col2)]
)
s5 = CloningVisitor().traverse(s)
assert orig == str(s) == str(s5)
def test_correlated_select(self):
s = select(['*'], t1.c.col1 == t2.c.col1,
from_obj=[t1, t2]).correlate(t2)
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
self.assert_compile(
select([t2]).where(t2.c.col1 == Vis().traverse(s)),
"SELECT table2.col1, table2.col2, table2.col3 "
"FROM table2 WHERE table2.col1 = "
"(SELECT * FROM table1 WHERE table1.col1 = table2.col1 "
"AND table1.col2 = :col2_1)"
)
def test_this_thing(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([s.c.col1])
self.assert_compile(s2,
'SELECT anon_1.col1 FROM (SELECT '
'table1.col1 AS col1, table1.col2 AS col2, '
'table1.col3 AS col3 FROM table1 WHERE '
'table1.col1 = :col1_1) AS anon_1')
t1a = t1.alias()
s2 = sql_util.ClauseAdapter(t1a).traverse(s2)
self.assert_compile(s2,
'SELECT anon_1.col1 FROM (SELECT '
'table1_1.col1 AS col1, table1_1.col2 AS '
'col2, table1_1.col3 AS col3 FROM table1 '
'AS table1_1 WHERE table1_1.col1 = '
':col1_1) AS anon_1')
def test_select_fromtwice_one(self):
t1a = t1.alias()
s = select([1], t1.c.col1 == t1a.c.col1, from_obj=t1a).correlate(t1a)
s = select([t1]).where(t1.c.col1 == s)
self.assert_compile(s,
"SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)"
)
s = CloningVisitor().traverse(s)
self.assert_compile(s,
"SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)")
def test_select_fromtwice_two(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([1], t1.c.col1 == s.c.col1, from_obj=s).correlate(t1)
s3 = select([t1]).where(t1.c.col1 == s2)
self.assert_compile(s3,
"SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)"
)
s4 = ReplacingCloningVisitor().traverse(s3)
self.assert_compile(s4,
"SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)"
)
class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_correlation_on_clone(self):
t1alias = t1.alias('t1alias')
t2alias = t2.alias('t2alias')
vis = sql_util.ClauseAdapter(t1alias)
s = select(['*'], from_obj=[t1alias, t2alias]).as_scalar()
assert t2alias in s._froms
assert t1alias in s._froms
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = vis.traverse(s)
assert t2alias not in s._froms # not present because it's been
# cloned
assert t1alias in s._froms # present because the adapter placed
# it there
# correlate list on "s" needs to take into account the full
# _cloned_set for each element in _froms when correlating
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = select(['*'], from_obj=[t1alias,
t2alias]).correlate(t2alias).as_scalar()
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = vis.traverse(s)
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = CloningVisitor().traverse(s)
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = select(['*']).where(t1.c.col1 == t2.c.col1).as_scalar()
self.assert_compile(select([t1.c.col1, s]),
'SELECT table1.col1, (SELECT * FROM table2 '
'WHERE table1.col1 = table2.col1) AS '
'anon_1 FROM table1')
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = CloningVisitor().traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = select(['*']).where(t1.c.col1
== t2.c.col1).correlate(t1).as_scalar()
self.assert_compile(select([t1.c.col1, s]),
'SELECT table1.col1, (SELECT * FROM table2 '
'WHERE table1.col1 = table2.col1) AS '
'anon_1 FROM table1')
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = CloningVisitor().traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
@testing.fails_on_everything_except()
def test_joins_dont_adapt(self):
# adapting to a join, i.e. ClauseAdapter(t1.join(t2)), doesn't
# make much sense. ClauseAdapter doesn't make any changes if
# it's against a straight join.
users = table('users', column('id'))
addresses = table('addresses', column('id'), column('user_id'))
ualias = users.alias()
s = select([func.count(addresses.c.id)], users.c.id
== addresses.c.user_id).correlate(users)
s = sql_util.ClauseAdapter(ualias).traverse(s)
j1 = addresses.join(ualias, addresses.c.user_id == ualias.c.id)
self.assert_compile(sql_util.ClauseAdapter(j1).traverse(s),
'SELECT count(addresses.id) AS count_1 '
'FROM addresses WHERE users_1.id = '
'addresses.user_id')
def test_table_to_alias_1(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
assert list(_from_objects(ff)) == [t1alias]
def test_table_to_alias_2(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], from_obj=[t1])),
'SELECT * FROM table1 AS t1alias')
def test_table_to_alias_3(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(select(['*'], t1.c.col1 == t2.c.col2),
'SELECT * FROM table1, table2 WHERE '
'table1.col1 = table2.col2')
def test_table_to_alias_4(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
def test_table_to_alias_5(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2, from_obj=[t1, t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
def test_table_to_alias_6(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select([t1alias, t2]).where(t1alias.c.col1 ==
vis.traverse(select(['*'],
t1.c.col1 == t2.c.col2,
from_obj=[t1, t2]).correlate(t1))),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1, table2.col2, table2.col3 "
"FROM table1 AS t1alias, table2 WHERE t1alias.col1 = "
"(SELECT * FROM table2 WHERE t1alias.col1 = table2.col2)"
)
def test_table_to_alias_7(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select([t1alias, t2]).where(t1alias.c.col1 ==
vis.traverse(select(['*'],
t1.c.col1 == t2.c.col2,
from_obj=[t1, t2]).correlate(t2))),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1, table2.col2, table2.col3 "
"FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = "
"(SELECT * FROM table1 AS t1alias "
"WHERE t1alias.col1 = table2.col2)")
def test_table_to_alias_8(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(case([(t1.c.col1 == 5,
t1.c.col2)], else_=t1.c.col1)),
'CASE WHEN (t1alias.col1 = :col1_1) THEN '
't1alias.col2 ELSE t1alias.col1 END')
def test_table_to_alias_9(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(case([(5, t1.c.col2)],
value=t1.c.col1, else_=t1.c.col1)),
'CASE t1alias.col1 WHEN :param_1 THEN '
't1alias.col2 ELSE t1alias.col1 END')
def test_table_to_alias_10(self):
s = select(['*'], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
def test_table_to_alias_11(self):
s = select(['*'], from_obj=[t1]).alias('foo')
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(s.select()),
'SELECT foo.* FROM (SELECT * FROM table1 '
'AS t1alias) AS foo')
def test_table_to_alias_12(self):
s = select(['*'], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
def test_table_to_alias_13(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
self.assert_compile(select([ff]),
'SELECT count(t1alias.col1) AS foo FROM '
'table1 AS t1alias')
assert list(_from_objects(ff)) == [t1alias]
#def test_table_to_alias_2(self):
# TODO: self.assert_compile(vis.traverse(select([func.count(t1.c
# .col1).l abel('foo')]), clone=True), "SELECT
# count(t1alias.col1) AS foo FROM table1 AS t1alias")
def test_table_to_alias_14(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
def test_table_to_alias_15(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2, from_obj=[t1, t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
def test_table_to_alias_16(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
select([t1alias, t2alias]).where(
t1alias.c.col1 ==
vis.traverse(select(['*'],
t1.c.col1 == t2.c.col2,
from_obj=[t1, t2]).correlate(t1))
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"t2alias.col1, t2alias.col2, t2alias.col3 "
"FROM table1 AS t1alias, table2 AS t2alias "
"WHERE t1alias.col1 = "
"(SELECT * FROM table2 AS t2alias "
"WHERE t1alias.col1 = t2alias.col2)"
)
def test_table_to_alias_17(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
t2alias.select().where(t2alias.c.col2 ==
vis.traverse(select(['*'],
t1.c.col1 == t2.c.col2,
from_obj=[t1, t2]).correlate(t2))),
'SELECT t2alias.col1, t2alias.col2, t2alias.col3 '
'FROM table2 AS t2alias WHERE t2alias.col2 = '
'(SELECT * FROM table1 AS t1alias WHERE '
't1alias.col1 = t2alias.col2)')
def test_include_exclude(self):
m = MetaData()
a = Table('a', m,
Column('id', Integer, primary_key=True),
Column('xxx_id', Integer,
ForeignKey('a.id', name='adf', use_alter=True)
)
)
e = (a.c.id == a.c.xxx_id)
assert str(e) == "a.id = a.xxx_id"
b = a.alias()
e = sql_util.ClauseAdapter( b, include= set([ a.c.id ]),
equivalents= { a.c.id: set([ a.c.id]) }
).traverse( e)
assert str(e) == "a_1.id = a.xxx_id"
def test_recursive_equivalents(self):
m = MetaData()
a = Table('a', m, Column('x', Integer), Column('y', Integer))
b = Table('b', m, Column('x', Integer), Column('y', Integer))
c = Table('c', m, Column('x', Integer), Column('y', Integer))
# force a recursion overflow, by linking a.c.x<->c.c.x, and
# asking for a nonexistent col. corresponding_column should prevent
# endless depth.
adapt = sql_util.ClauseAdapter(b,
equivalents={a.c.x: set([c.c.x]), c.c.x: set([a.c.x])})
assert adapt._corresponding_column(a.c.x, False) is None
def test_multilevel_equivalents(self):
m = MetaData()
a = Table('a', m, Column('x', Integer), Column('y', Integer))
b = Table('b', m, Column('x', Integer), Column('y', Integer))
c = Table('c', m, Column('x', Integer), Column('y', Integer))
alias = select([a]).select_from(a.join(b, a.c.x == b.c.x)).alias()
# two levels of indirection from c.x->b.x->a.x, requires recursive
# corresponding_column call
adapt = sql_util.ClauseAdapter(alias,
equivalents={b.c.x: set([a.c.x]), c.c.x: set([b.c.x])})
assert adapt._corresponding_column(a.c.x, False) is alias.c.x
assert adapt._corresponding_column(c.c.x, False) is alias.c.x
def test_join_to_alias(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
b = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
c = Table('c', metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey('b.id')),
)
d = Table('d', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
j1 = a.outerjoin(b)
j2 = select([j1], use_labels=True)
j3 = c.join(j2, j2.c.b_id == c.c.bid)
j4 = j3.outerjoin(d)
self.assert_compile(j4,
'c JOIN (SELECT a.id AS a_id, b.id AS '
'b_id, b.aid AS b_aid FROM a LEFT OUTER '
'JOIN b ON a.id = b.aid) ON b_id = c.bid '
'LEFT OUTER JOIN d ON a_id = d.aid')
j5 = j3.alias('foo')
j6 = sql_util.ClauseAdapter(j5).copy_and_process([j4])[0]
# this statement takes c join(a join b), wraps it inside an
# aliased "select * from c join(a join b) AS foo". the outermost
# right side "left outer join d" stays the same, except "d"
# joins against foo.a_id instead of plain "a_id"
self.assert_compile(j6,
'(SELECT c.id AS c_id, c.bid AS c_bid, '
'a_id AS a_id, b_id AS b_id, b_aid AS '
'b_aid FROM c JOIN (SELECT a.id AS a_id, '
'b.id AS b_id, b.aid AS b_aid FROM a LEFT '
'OUTER JOIN b ON a.id = b.aid) ON b_id = '
'c.bid) AS foo LEFT OUTER JOIN d ON '
'foo.a_id = d.aid')
def test_derived_from(self):
assert select([t1]).is_derived_from(t1)
assert not select([t2]).is_derived_from(t1)
assert not t1.is_derived_from(select([t1]))
assert t1.alias().is_derived_from(t1)
s1 = select([t1, t2]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
assert s2.is_derived_from(s1)
s2 = s2._clone()
assert s2.is_derived_from(s1)
def test_aliasedselect_to_aliasedselect_straight(self):
# original issue from ticket #904
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(s1),
'SELECT foo.col1, foo.col2, foo.col3 FROM '
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1) '
'AS foo LIMIT :param_1 OFFSET :param_2',
{'param_1': 5, 'param_2': 10})
def test_aliasedselect_to_aliasedselect_join(self):
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
j = s1.outerjoin(t2, s1.c.col1 == t2.c.col1)
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(),
'SELECT anon_1.col1, anon_1.col2, '
'anon_1.col3, table2.col1, table2.col2, '
'table2.col3 FROM (SELECT foo.col1 AS '
'col1, foo.col2 AS col2, foo.col3 AS col3 '
'FROM (SELECT table1.col1 AS col1, '
'table1.col2 AS col2, table1.col3 AS col3 '
'FROM table1) AS foo LIMIT :param_1 OFFSET '
':param_2) AS anon_1 LEFT OUTER JOIN '
'table2 ON anon_1.col1 = table2.col1',
{'param_1': 5, 'param_2': 10})
def test_aliasedselect_to_aliasedselect_join_nested_table(self):
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
talias = t1.alias('bar')
assert not s2.is_derived_from(talias)
j = s1.outerjoin(talias, s1.c.col1 == talias.c.col1)
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(),
'SELECT anon_1.col1, anon_1.col2, '
'anon_1.col3, bar.col1, bar.col2, bar.col3 '
'FROM (SELECT foo.col1 AS col1, foo.col2 '
'AS col2, foo.col3 AS col3 FROM (SELECT '
'table1.col1 AS col1, table1.col2 AS col2, '
'table1.col3 AS col3 FROM table1) AS foo '
'LIMIT :param_1 OFFSET :param_2) AS anon_1 '
'LEFT OUTER JOIN table1 AS bar ON '
'anon_1.col1 = bar.col1', {'param_1': 5,
'param_2': 10})
def test_functions(self):
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).\
traverse(func.count(t1.c.col1)),
'count(table1_1.col1)')
s = select([func.count(t1.c.col1)])
self.assert_compile(sql_util.ClauseAdapter(t1.alias()).traverse(s),
'SELECT count(table1_1.col1) AS count_1 '
'FROM table1 AS table1_1')
def test_recursive(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
b = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
c = Table('c', metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey('b.id')),
)
d = Table('d', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
u = union(
a.join(b).select().apply_labels(),
a.join(d).select().apply_labels()
).alias()
self.assert_compile(
sql_util.ClauseAdapter(u).\
traverse(select([c.c.bid]).where(c.c.bid == u.c.b_aid)),
"SELECT c.bid "\
"FROM c, (SELECT a.id AS a_id, b.id AS b_id, b.aid AS b_aid "
"FROM a JOIN b ON a.id = b.aid UNION SELECT a.id AS a_id, d.id "
"AS d_id, d.aid AS d_aid "
"FROM a JOIN d ON a.id = d.aid) AS anon_1 "
"WHERE c.bid = anon_1.b_aid"
)
class SpliceJoinsTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global table1, table2, table3, table4
def _table(name):
return table(name, column('col1'), column('col2'),
column('col3'))
table1, table2, table3, table4 = [_table(name) for name in
('table1', 'table2', 'table3', 'table4')]
def test_splice(self):
t1, t2, t3, t4 = table1, table2, table1.alias(), table2.alias()
j = t1.join(t2, t1.c.col1 == t2.c.col1).join(t3, t2.c.col1
== t3.c.col1).join(t4, t4.c.col1 == t1.c.col1)
s = select([t1]).where(t1.c.col2 < 5).alias()
self.assert_compile(sql_util.splice_joins(s, j),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'WHERE table1.col2 < :col2_1) AS anon_1 '
'JOIN table2 ON anon_1.col1 = table2.col1 '
'JOIN table1 AS table1_1 ON table2.col1 = '
'table1_1.col1 JOIN table2 AS table2_1 ON '
'table2_1.col1 = anon_1.col1')
def test_stop_on(self):
t1, t2, t3 = table1, table2, table3
j1 = t1.join(t2, t1.c.col1 == t2.c.col1)
j2 = j1.join(t3, t2.c.col1 == t3.c.col1)
s = select([t1]).select_from(j1).alias()
self.assert_compile(sql_util.splice_joins(s, j2),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'JOIN table2 ON table1.col1 = table2.col1) '
'AS anon_1 JOIN table2 ON anon_1.col1 = '
'table2.col1 JOIN table3 ON table2.col1 = '
'table3.col1')
self.assert_compile(sql_util.splice_joins(s, j2, j1),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'JOIN table2 ON table1.col1 = table2.col1) '
'AS anon_1 JOIN table3 ON table2.col1 = '
'table3.col1')
def test_splice_2(self):
t2a = table2.alias()
t3a = table3.alias()
j1 = table1.join(t2a, table1.c.col1 == t2a.c.col1).join(t3a,
t2a.c.col2 == t3a.c.col2)
t2b = table4.alias()
j2 = table1.join(t2b, table1.c.col3 == t2b.c.col3)
self.assert_compile(sql_util.splice_joins(table1, j1),
'table1 JOIN table2 AS table2_1 ON '
'table1.col1 = table2_1.col1 JOIN table3 '
'AS table3_1 ON table2_1.col2 = '
'table3_1.col2')
self.assert_compile(sql_util.splice_joins(table1, j2),
'table1 JOIN table4 AS table4_1 ON '
'table1.col3 = table4_1.col3')
self.assert_compile(sql_util.splice_joins(sql_util.splice_joins(table1,
j1), j2),
'table1 JOIN table2 AS table2_1 ON '
'table1.col1 = table2_1.col1 JOIN table3 '
'AS table3_1 ON table2_1.col2 = '
'table3_1.col2 JOIN table4 AS table4_1 ON '
'table1.col3 = table4_1.col3')
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
"""tests the generative capability of Select"""
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_columns(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.column('yyy')
self.assert_compile(select_copy,
'SELECT table1.col1, table1.col2, '
'table1.col3, yyy FROM table1')
assert s.columns is not select_copy.columns
assert s._columns is not select_copy._columns
assert s._raw_columns is not select_copy._raw_columns
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_froms(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.select_from(t2)
self.assert_compile(select_copy,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1, table2')
assert s._froms is not select_copy._froms
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_prefixes(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.prefix_with('FOOBER')
self.assert_compile(select_copy,
'SELECT FOOBER table1.col1, table1.col2, '
'table1.col3 FROM table1')
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_execution_options(self):
s = select().execution_options(foo='bar')
s2 = s.execution_options(bar='baz')
s3 = s.execution_options(foo='not bar')
# The original select should not be modified.
assert s._execution_options == dict(foo='bar')
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo='bar', bar='baz')
assert s3._execution_options == dict(foo='not bar')
def test_invalid_options(self):
assert_raises(
exc.ArgumentError,
select().execution_options, compiled_cache={}
)
assert_raises(
exc.ArgumentError,
select().execution_options,
isolation_level='READ_COMMITTED'
)
# this feature not available yet
def _NOTYET_test_execution_options_in_kwargs(self):
s = select(execution_options=dict(foo='bar'))
s2 = s.execution_options(bar='baz')
# The original select should not be modified.
assert s._execution_options == dict(foo='bar')
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo='bar', bar='baz')
# this feature not available yet
def _NOTYET_test_execution_options_in_text(self):
s = text('select 42', execution_options=dict(foo='bar'))
assert s._execution_options == dict(foo='bar')
class ValuesBaseTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests the generative capability of Insert, Update"""
__dialect__ = 'default'
# fixme: consolidate converage from elsewhere here and expand
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_prefixes(self):
i = t1.insert()
self.assert_compile(i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
gen = i.prefix_with("foober")
self.assert_compile(gen,
"INSERT foober INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
self.assert_compile(i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
i2 = t1.insert(prefixes=['squiznart'])
self.assert_compile(i2,
"INSERT squiznart INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
gen2 = i2.prefix_with("quux")
self.assert_compile(gen2,
"INSERT squiznart quux INTO "
"table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
def test_add_kwarg(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values(col1=5)
eq_(i.parameters, {"col1": 5})
i = i.values(col2=7)
eq_(i.parameters, {"col1": 5, "col2": 7})
def test_via_tuple_single(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values((5, 6, 7))
eq_(i.parameters, {"col1": 5, "col2": 6, "col3": 7})
def test_kw_and_dict_simulatenously_single(self):
i = t1.insert()
i = i.values({"col1": 5}, col2=7)
eq_(i.parameters, {"col1": 5, "col2": 7})
def test_via_tuple_multi(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values([(5, 6, 7), (8, 9, 10)])
eq_(i.parameters, [
{"col1": 5, "col2": 6, "col3": 7},
{"col1": 8, "col2": 9, "col3": 10},
]
)
def test_inline_values_single(self):
i = t1.insert(values={"col1": 5})
eq_(i.parameters, {"col1": 5})
is_(i._has_multi_parameters, False)
def test_inline_values_multi(self):
i = t1.insert(values=[{"col1": 5}, {"col1": 6}])
eq_(i.parameters, [{"col1": 5}, {"col1": 6}])
is_(i._has_multi_parameters, True)
def test_add_dictionary(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values({"col1": 5})
eq_(i.parameters, {"col1": 5})
is_(i._has_multi_parameters, False)
i = i.values({"col1": 6})
# note replaces
eq_(i.parameters, {"col1": 6})
is_(i._has_multi_parameters, False)
i = i.values({"col2": 7})
eq_(i.parameters, {"col1": 6, "col2": 7})
is_(i._has_multi_parameters, False)
def test_add_kwarg_disallowed_multi(self):
i = t1.insert()
i = i.values([{"col1": 5}, {"col1": 7}])
assert_raises_message(
exc.InvalidRequestError,
"This construct already has multiple parameter sets.",
i.values, col2=7
)
def test_cant_mix_single_multi_formats_dict_to_list(self):
i = t1.insert().values(col1=5)
assert_raises_message(
exc.ArgumentError,
"Can't mix single-values and multiple values "
"formats in one statement",
i.values, [{"col1": 6}]
)
def test_cant_mix_single_multi_formats_list_to_dict(self):
i = t1.insert().values([{"col1": 6}])
assert_raises_message(
exc.ArgumentError,
"Can't mix single-values and multiple values "
"formats in one statement",
i.values, {"col1": 5}
)
def test_erroneous_multi_args_dicts(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values, {"col1": 5}, {"col1": 7}
)
def test_erroneous_multi_args_tuples(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values, (5, 6, 7), (8, 9, 10)
)
def test_erroneous_multi_args_plus_kw(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Can't pass kwargs and multiple parameter sets simultaenously",
i.values, [{"col1": 5}], col2=7
)
def test_update_no_support_multi_values(self):
u = t1.update()
assert_raises_message(
exc.InvalidRequestError,
"This construct does not support multiple parameter sets.",
u.values, [{"col1": 5}, {"col1": 7}]
)
def test_update_no_support_multi_constructor(self):
assert_raises_message(
exc.InvalidRequestError,
"This construct does not support multiple parameter sets.",
t1.update, values=[{"col1": 5}, {"col1": 7}]
)
|
rclmenezes/sqlalchemy
|
test/sql/test_generative.py
|
Python
|
mit
| 57,284
|
[
"ADF",
"VisIt"
] |
d886e7b9ffe4c1def322e4928484998fdafc7db515e61359ee57a490592545f3
|
'''
FromScratchGauss.py
Initialize params of a mixture model with gaussian observations from scratch.
'''
import numpy as np
from bnpy.util import discrete_single_draw
from bnpy.data import XData
def init_global_params(hmodel, Data, initname='randexamples', seed=0, K=0, **kwargs):
PRNG = np.random.RandomState(seed)
X = Data.X
if initname == 'randexamples':
''' Choose K items uniformly at random from the Data
then component params by M-step given those single items
'''
resp = np.zeros((Data.nObs, K))
permIDs = PRNG.permutation(Data.nObs).tolist()
for k in xrange(K):
resp[permIDs[k],k] = 1.0
elif initname == 'randexamplesbydist':
''' Choose K items from the Data,
selecting the first at random,
then subsequently proportional to euclidean distance to the closest item
'''
objID = discrete_single_draw(np.ones(Data.nObs), PRNG)
chosenObjIDs = list([objID])
minDistVec = np.inf * np.ones(Data.nObs)
for k in range(1, K):
curDistVec = np.sum((Data.X - Data.X[objID])**2, axis=1)
minDistVec = np.minimum(minDistVec, curDistVec)
objID = discrete_single_draw(minDistVec, PRNG)
chosenObjIDs.append(objID)
resp = np.zeros((Data.nObs, K))
for k in xrange(K):
resp[chosenObjIDs[k], k] = 1.0
elif initname == 'randsoftpartition':
''' Randomly assign all data items some mass in each of K components
then create component params by M-step given that soft partition
'''
resp = PRNG.rand(Data.nObs, K)
resp = resp/np.sum(resp,axis=1)[:,np.newaxis]
elif initname == 'randomnaive':
''' Generate K "fake" examples from the diagonalized data covariance,
creating params by assigning each "fake" example to a component.
'''
Sig = np.sqrt(np.diag(np.cov(Data.X.T)))
Xfake = Sig * PRNG.randn(K, Data.dim)
Data = XData(Xfake)
resp = np.eye(K)
LP = dict(resp=resp)
SS = hmodel.get_global_suff_stats(Data, LP)
hmodel.update_global_params(SS)
|
daeilkim/refinery
|
refinery/bnpy/bnpy-dev/bnpy/init/FromScratchGauss.py
|
Python
|
mit
| 2,016
|
[
"Gaussian"
] |
0bbe84f0a4ee516bef29271d182e62ff4a21c48601b8834239adb4dfde7dcd70
|
#!/usr/bin/env python
"""aug2cmds converts an Augeas tree into a set of Augeas commands
Designed for use with augtool and Puppet.
"""
import __init__ as aug2cmds
import outputs
import argparse
def main():
"""Runs aug2cmds as an interactive tool"""
parser = argparse.ArgumentParser(
description="Convert file tree to Augeas commands for use in\
augtool/Puppet")
parser.add_argument('-r', '--root',
help='use ROOT as the root of the filesystem')
parser.add_argument('-l', '--lens',
help='lens to parse PATH with (e.g. Sudoers.lns)')
parser.add_argument('-y', '--yes',
action='store_const', const='yes',
help='always take default choices')
parser.add_argument('-f', '--format',
choices=['augtool', 'puppet'], default="augtool",
help='output format')
parser.add_argument('path',
help='filename relative to ROOT to parse')
parser.add_argument('augpath',
nargs='?',
help='optional Augeas path inside file to process')
args = parser.parse_args()
pathnode = aug2cmds.PathNode(args.path, root=args.root, lens=args.lens)
if args.format == "augtool":
output = outputs.Augtool()
else:
raise RuntimeError("Unknown output format")
for cmd in output.visit(pathnode, args.augpath):
print cmd
if __name__ == "__main__":
main()
|
domcleal/aug2cmds
|
aug2cmds/__main__.py
|
Python
|
bsd-3-clause
| 1,377
|
[
"VisIt"
] |
3327c84f3bc5af99d1ffefcc086a13c5a855b1ae9f9dc99af8aa87491e6fff2c
|
import logging
import numpy as np
import os
import pkg_resources
from pprint import pformat
import scipy
from scipy.ndimage.morphology import generate_binary_structure, iterate_structure
import caiman.utils.utils
from ...paths import caiman_datadir
from .utilities import dict_compare, get_file_size
class CNMFParams(object):
"""Class for setting and changing the various parameters."""
def __init__(self, fnames=None, dims=None, dxy=(1, 1),
border_pix=0, del_duplicates=False, low_rank_background=True,
memory_fact=1, n_processes=1, nb_patch=1, p_ssub=2, p_tsub=2,
remove_very_bad_comps=False, rf=None, stride=None,
check_nan=True, n_pixels_per_process=None,
k=30, alpha_snmf=100, center_psf=False, gSig=[5, 5], gSiz=None,
init_iter=2, method_init='greedy_roi', min_corr=.85,
min_pnr=20, gnb=1, normalize_init=True, options_local_NMF=None,
ring_size_factor=1.5, rolling_length=100, rolling_sum=True,
ssub=2, ssub_B=2, tsub=2,
block_size_spat=5000, num_blocks_per_run_spat=20,
block_size_temp=5000, num_blocks_per_run_temp=20,
update_background_components=True,
method_deconvolution='oasis', p=2, s_min=None,
do_merge=True, merge_thresh=0.8,
decay_time=0.4, fr=30, min_SNR=2.5, rval_thr=0.8,
N_samples_exceptionality=None, batch_update_suff_stat=False,
expected_comps=500, iters_shape=5, max_comp_update_shape=np.inf,
max_num_added=5, min_num_trial=5, minibatch_shape=100, minibatch_suff_stat=5,
n_refit=0, num_times_comp_updated=np.inf, simultaneously=False,
sniper_mode=False, test_both=False, thresh_CNN_noisy=0.5,
thresh_fitness_delta=-50, thresh_fitness_raw=None, thresh_overlap=0.5,
update_freq=200, update_num_comps=True, use_dense=True, use_peak_max=True,
only_init_patch=True, var_name_hdf5='mov', max_merge_area=None,
use_corr_img=False, params_dict={},
):
"""Class for setting the processing parameters. All parameters for CNMF, online-CNMF, quality testing,
and motion correction can be set here and then used in the various processing pipeline steps.
The prefered way to set parameters is by using the set function, where a subclass is determined and a
dictionary is passed. The whole dictionary can also be initialized at once by passing a dictionary params_dict
when initializing the CNMFParams object. Direct setting of the positional arguments in CNMFParams is only
present for backwards compatibility reasons and should not be used if possible.
Args:
Any parameter that is not set get a default value specified
by the dictionary default options
DATA PARAMETERS (CNMFParams.data) #####
fnames: list[str]
list of complete paths to files that need to be processed
dims: (int, int), default: computed from fnames
dimensions of the FOV in pixels
fr: float, default: 30
imaging rate in frames per second
decay_time: float, default: 0.4
length of typical transient in seconds
dxy: (float, float)
spatial resolution of FOV in pixels per um
var_name_hdf5: str, default: 'mov'
if loading from hdf5 name of the variable to load
caiman_version: str
version of CaImAn being used
last_commit: str
hash of last commit in the caiman repo
mmap_F: list[str]
paths to F-order memory mapped files after motion correction
mmap_C: str
path to C-order memory mapped file after motion correction
PATCH PARAMS (CNMFParams.patch)######
rf: int or list or None, default: None
Half-size of patch in pixels. If None, no patches are constructed and the whole FOV is processed jointly.
If list, it should be a list of two elements corresponding to the height and width of patches
stride: int or None, default: None
Overlap between neighboring patches in pixels.
nb_patch: int, default: 1
Number of (local) background components per patch
border_pix: int, default: 0
Number of pixels to exclude around each border.
low_rank_background: bool, default: True
Whether to update the background using a low rank approximation.
If False all the nonzero elements of the background components are updated using hals
(to be used with one background per patch)
del_duplicates: bool, default: False
Delete duplicate components in the overlaping regions between neighboring patches. If False,
then merging is used.
only_init: bool, default: True
whether to run only the initialization
p_patch: int, default: 0
order of AR dynamics when processing within a patch
skip_refinement: bool, default: False
Whether to skip refinement of components (deprecated?)
remove_very_bad_comps: bool, default: True
Whether to remove (very) bad quality components during patch processing
p_ssub: float, default: 2
Spatial downsampling factor
p_tsub: float, default: 2
Temporal downsampling factor
memory_fact: float, default: 1
unitless number for increasing the amount of available memory
n_processes: int
Number of processes used for processing patches in parallel
in_memory: bool, default: True
Whether to load patches in memory
PRE-PROCESS PARAMS (CNMFParams.preprocess) #############
sn: np.array or None, default: None
noise level for each pixel
noise_range: [float, float], default: [.25, .5]
range of normalized frequencies over which to compute the PSD for noise determination
noise_method: 'mean'|'median'|'logmexp', default: 'mean'
PSD averaging method for computing the noise std
max_num_samples_fft: int, default: 3*1024
Chunk size for computing the PSD of the data (for memory considerations)
n_pixels_per_process: int, default: 1000
Number of pixels to be allocated to each process
compute_g': bool, default: False
whether to estimate global time constant
p: int, default: 2
order of AR indicator dynamics
lags: int, default: 5
number of lags to be considered for time constant estimation
include_noise: bool, default: False
flag for using noise values when estimating g
pixels: list, default: None
pixels to be excluded due to saturation
check_nan: bool, default: True
whether to check for NaNs
INIT PARAMS (CNMFParams.init)###############
K: int, default: 30
number of components to be found (per patch or whole FOV depending on whether rf=None)
SC_kernel: {'heat', 'cos', binary'}, default: 'heat'
kernel for graph affinity matrix
SC_sigma: float, default: 1
variance for SC kernel
SC_thr: float, default: 0,
threshold for affinity matrix
SC_normalize: bool, default: True
standardize entries prior to computing the affinity matrix
SC_use_NN: bool, default: False
sparsify affinity matrix by using only nearest neighbors
SC_nnn: int, default: 20
number of nearest neighbors to use
gSig: [int, int], default: [5, 5]
radius of average neurons (in pixels)
gSiz: [int, int], default: [int(round((x * 2) + 1)) for x in gSig],
half-size of bounding box for each neuron
center_psf: bool, default: False
whether to use 1p data processing mode. Set to true for 1p
ssub: float, default: 2
spatial downsampling factor
tsub: float, default: 2
temporal downsampling factor
nb: int, default: 1
number of background components
lambda_gnmf: float, default: 1.
regularization weight for graph NMF
maxIter: int, default: 5
number of HALS iterations during initialization
method_init: 'greedy_roi'|'corr_pnr'|'sparse_NMF'|'local_NMF' default: 'greedy_roi'
initialization method. use 'corr_pnr' for 1p processing and 'sparse_NMF' for dendritic processing.
min_corr: float, default: 0.85
minimum value of correlation image for determining a candidate component during corr_pnr
min_pnr: float, default: 20
minimum value of psnr image for determining a candidate component during corr_pnr
seed_method: str {'auto', 'manual', 'semi'}
methods for choosing seed pixels during greedy_roi or corr_pnr initialization
'semi' detects nr components automatically and allows to add more manually
if running as notebook 'semi' and 'manual' require a backend that does not
inline figures, e.g. %matplotlib tk
ring_size_factor: float, default: 1.5
radius of ring (*gSig) for computing background during corr_pnr
ssub_B: float, default: 2
downsampling factor for background during corr_pnr
init_iter: int, default: 2
number of iterations during corr_pnr (1p) initialization
nIter: int, default: 5
number of rank-1 refinement iterations during greedy_roi initialization
rolling_sum: bool, default: True
use rolling sum (as opposed to full sum) for determining candidate centroids during greedy_roi
rolling_length: int, default: 100
width of rolling window for rolling sum option
kernel: np.array or None, default: None
user specified template for greedyROI
max_iter_snmf : int, default: 500
maximum number of iterations for sparse NMF initialization
alpha_snmf: float, default: 100
sparse NMF sparsity regularization weight
sigma_smooth_snmf : (float, float, float), default: (.5,.5,.5)
std of Gaussian kernel for smoothing data in sparse_NMF
perc_baseline_snmf: float, default: 20
percentile to be removed from the data in sparse_NMF prior to decomposition
normalize_init: bool, default: True
whether to equalize the movies during initialization
options_local_NMF: dict
dictionary with parameters to pass to local_NMF initializer
SPATIAL PARAMS (CNMFParams.spatial) ##########
method_exp: 'dilate'|'ellipse', default: 'dilate'
method for expanding footprint of spatial components
dist: float, default: 3
expansion factor of ellipse
expandCore: morphological element, default: None(?)
morphological element for expanding footprints under dilate
nb: int, default: 1
number of global background components
n_pixels_per_process: int, default: 1000
number of pixels to be processed by each worker
thr_method: 'nrg'|'max', default: 'nrg'
thresholding method
maxthr: float, default: 0.1
Max threshold
nrgthr: float, default: 0.9999
Energy threshold
extract_cc: bool, default: True
whether to extract connected components during thresholding
(might want to turn to False for dendritic imaging)
medw: (int, int) default: None
window of median filter (set to (3,)*len(dims) in cnmf.fit)
se: np.array or None, default: None
Morphological closing structuring element (set to np.ones((3,)*len(dims), dtype=np.uint8) in cnmf.fit)
ss: np.array or None, default: None
Binary element for determining connectivity (set to np.ones((3,)*len(dims), dtype=np.uint8) in cnmf.fit)
update_background_components: bool, default: True
whether to update the spatial background components
method_ls: 'lasso_lars'|'nnls_L0', default: 'lasso_lars'
'nnls_L0'. Nonnegative least square with L0 penalty
'lasso_lars' lasso lars function from scikit learn
block_size : int, default: 5000
Number of pixels to process at the same time for dot product. Reduce if you face memory problems
num_blocks_per_run: int, default: 20
Parallelization of A'*Y operation
normalize_yyt_one: bool, default: True
Whether to normalize the C and A matrices so that diag(C*C.T) = 1 during update spatial
TEMPORAL PARAMS (CNMFParams.temporal)###########
ITER: int, default: 2
block coordinate descent iterations
method_deconvolution: 'oasis'|'cvxpy'|'oasis', default: 'oasis'
method for solving the constrained deconvolution problem ('oasis','cvx' or 'cvxpy')
if method cvxpy, primary and secondary (if problem unfeasible for approx solution)
solvers: 'ECOS'|'SCS', default: ['ECOS', 'SCS']
solvers to be used with cvxpy, can be 'ECOS','SCS' or 'CVXOPT'
p: 0|1|2, default: 2
order of AR indicator dynamics
memory_efficient: False
bas_nonneg: bool, default: True
whether to set a non-negative baseline (otherwise b >= min(y))
noise_range: [float, float], default: [.25, .5]
range of normalized frequencies over which to compute the PSD for noise determination
noise_method: 'mean'|'median'|'logmexp', default: 'mean'
PSD averaging method for computing the noise std
lags: int, default: 5
number of autocovariance lags to be considered for time constant estimation
optimize_g: bool, default: False
flag for optimizing time constants
fudge_factor: float (close but smaller than 1) default: .96
bias correction factor for discrete time constants
nb: int, default: 1
number of global background components
verbosity: bool, default: False
whether to be verbose
block_size : int, default: 5000
Number of pixels to process at the same time for dot product. Reduce if you face memory problems
num_blocks_per_run: int, default: 20
Parallelization of A'*Y operation
s_min: float or None, default: None
Minimum spike threshold amplitude (computed in the code if used).
MERGE PARAMS (CNMFParams.merge)#####
do_merge: bool, default: True
Whether or not to merge
thr: float, default: 0.8
Trace correlation threshold for merging two components.
merge_parallel: bool, default: False
Perform merging in parallel
max_merge_area: int or None, default: None
maximum area (in pixels) of merged components, used to determine whether to merge components during fitting process
QUALITY EVALUATION PARAMETERS (CNMFParams.quality)###########
min_SNR: float, default: 2.5
trace SNR threshold. Traces with SNR above this will get accepted
SNR_lowest: float, default: 0.5
minimum required trace SNR. Traces with SNR below this will get rejected
rval_thr: float, default: 0.8
space correlation threshold. Components with correlation higher than this will get accepted
rval_lowest: float, default: -1
minimum required space correlation. Components with correlation below this will get rejected
use_cnn: bool, default: True
flag for using the CNN classifier.
min_cnn_thr: float, default: 0.9
CNN classifier threshold. Components with score higher than this will get accepted
cnn_lowest: float, default: 0.1
minimum required CNN threshold. Components with score lower than this will get rejected.
gSig_range: list or integers, default: None
gSig scale values for CNN classifier. In not None, multiple values are tested in the CNN classifier.
ONLINE CNMF (ONACID) PARAMETERS (CNMFParams.online)#####
N_samples_exceptionality: int, default: np.ceil(decay_time*fr),
Number of frames over which trace SNR is computed (usually length of a typical transient)
batch_update_suff_stat: bool, default: False
Whether to update sufficient statistics in batch mode
ds_factor: int, default: 1,
spatial downsampling factor for faster processing (if > 1)
dist_shape_update: bool, default: False,
update shapes in a distributed fashion
epochs: int, default: 1,
number of times to go over data
expected_comps: int, default: 500
number of expected components (for memory allocation purposes)
full_XXt: bool, default: False
save the full residual sufficient statistic matrix for updating W in 1p.
If set to False, a list of submatrices is saved (typically faster).
init_batch: int, default: 200,
length of mini batch used for initialization
init_method: 'bare'|'cnmf'|'seeded', default: 'bare',
initialization method
iters_shape: int, default: 5
Number of block-coordinate decent iterations for each shape update
max_comp_update_shape: int, default: np.inf
Maximum number of spatial components to be updated at each time
max_num_added: int, default: 5
Maximum number of new components to be added in each frame
max_shifts_online: int, default: 10,
Maximum shifts for motion correction during online processing
min_SNR: float, default: 2.5
Trace SNR threshold for accepting a new component
min_num_trial: int, default: 5
Number of mew possible components for each frame
minibatch_shape: int, default: 100
Number of frames stored in rolling buffer
minibatch_suff_stat: int, default: 5
mini batch size for updating sufficient statistics
motion_correct: bool, default: True
Whether to perform motion correction during online processing
movie_name_online: str, default: 'online_movie.avi'
Name of saved movie (appended in the data directory)
normalize: bool, default: False
Whether to normalize each frame prior to online processing
n_refit: int, default: 0
Number of additional iterations for computing traces
num_times_comp_updated: int, default: np.inf
opencv_codec: str, default: 'H264'
FourCC video codec for saving movie. Check http://www.fourcc.org/codecs.php
path_to_model: str, default: os.path.join(caiman_datadir(), 'model', 'cnn_model_online.h5')
Path to online CNN classifier
rval_thr: float, default: 0.8
space correlation threshold for accepting a new component
save_online_movie: bool, default: False
Whether to save the results movie
show_movie: bool, default: False
Whether to display movie of online processing
simultaneously: bool, default: False
Whether to demix and deconvolve simultaneously
sniper_mode: bool, default: False
Whether to use the online CNN classifier for screening candidate components (otherwise space
correlation is used)
test_both: bool, default: False
Whether to use both the CNN and space correlation for screening new components
thresh_CNN_noisy: float, default: 0,5,
Threshold for the online CNN classifier
thresh_fitness_delta: float (negative)
Derivative test for detecting traces
thresh_fitness_raw: float (negative), default: computed from min_SNR
Threshold value for testing trace SNR
thresh_overlap: float, default: 0.5
Intersection-over-Union space overlap threshold for screening new components
update_freq: int, default: 200
Update each shape at least once every X frames when in distributed mode
update_num_comps: bool, default: True
Whether to search for new components
use_dense: bool, default: True
Whether to store and represent A and b as a dense matrix
use_peak_max: bool, default: True
Whether to find candidate centroids using skimage's find local peaks function
MOTION CORRECTION PARAMETERS (CNMFParams.motion)####
border_nan: bool or str, default: 'copy'
flag for allowing NaN in the boundaries. True allows NaN, whereas 'copy' copies the value of the
nearest data point.
gSig_filt: int or None, default: None
size of kernel for high pass spatial filtering in 1p data. If None no spatial filtering is performed
is3D: bool, default: False
flag for 3D recordings for motion correction
max_deviation_rigid: int, default: 3
maximum deviation in pixels between rigid shifts and shifts of individual patches
max_shifts: (int, int), default: (6,6)
maximum shifts per dimension in pixels.
min_mov: float or None, default: None
minimum value of movie. If None it get computed.
niter_rig: int, default: 1
number of iterations rigid motion correction.
nonneg_movie: bool, default: True
flag for producing a non-negative movie.
num_frames_split: int, default: 80
split movie every x frames for parallel processing
num_splits_to_process_els, default: [7, None]
num_splits_to_process_rig, default: None
overlaps: (int, int), default: (24, 24)
overlap between patches in pixels in pw-rigid motion correction.
pw_rigid: bool, default: False
flag for performing pw-rigid motion correction.
shifts_opencv: bool, default: True
flag for applying shifts using cubic interpolation (otherwise FFT)
splits_els: int, default: 14
number of splits across time for pw-rigid registration
splits_rig: int, default: 14
number of splits across time for rigid registration
strides: (int, int), default: (96, 96)
how often to start a new patch in pw-rigid registration. Size of each patch will be strides + overlaps
upsample_factor_grid" int, default: 4
motion field upsampling factor during FFT shifts.
use_cuda: bool, default: False
flag for using a GPU.
indices: tuple(slice), default: (slice(None), slice(None))
Use that to apply motion correction only on a part of the FOV
RING CNN PARAMETERS (CNMFParams.ring_CNN)
n_channels: int, default: 2
Number of "ring" kernels
use_bias: bool, default: False
Flag for using bias in the convolutions
use_add: bool, default: False
Flag for using an additive layer
pct: float between 0 and 1, default: 0.01
Quantile used during training with quantile loss function
patience: int, default: 3
Number of epochs to wait before early stopping
max_epochs: int, default: 100
Maximum number of epochs to be used during training
width: int, default: 5
Width of "ring" kernel
loss_fn: str, default: 'pct'
Loss function specification ('pct' for quantile loss function,
'mse' for mean squared error)
lr: float, default: 1e-3
(initial) learning rate
lr_scheduler: function, default: None
Learning rate scheduler function
path_to_model: str, default: None
Path to saved weights (if training then path to saved model weights)
remove_activity: bool, default: False
Flag for removing activity of last frame prior to background extraction
reuse_model: bool, default: False
Flag for reusing an already trained model (saved in path to model)
"""
self.data = {
'fnames': fnames,
'dims': dims,
'fr': fr,
'decay_time': decay_time,
'dxy': dxy,
'var_name_hdf5': var_name_hdf5,
'caiman_version': pkg_resources.get_distribution('caiman').version,
'last_commit': None,
'mmap_F': None,
'mmap_C': None
}
self.patch = {
'border_pix': border_pix,
'del_duplicates': del_duplicates,
'in_memory': True,
'low_rank_background': low_rank_background,
'memory_fact': memory_fact,
'n_processes': n_processes,
'nb_patch': nb_patch,
'only_init': only_init_patch,
'p_patch': 0, # AR order within patch
'remove_very_bad_comps': remove_very_bad_comps,
'rf': rf,
'skip_refinement': False,
'p_ssub': p_ssub, # spatial downsampling factor
'stride': stride,
'p_tsub': p_tsub, # temporal downsampling factor
}
self.preprocess = {
'check_nan': check_nan,
'compute_g': False, # flag for estimating global time constant
'include_noise': False, # flag for using noise values when estimating g
# number of autocovariance lags to be considered for time constant estimation
'lags': 5,
'max_num_samples_fft': 3 * 1024,
'n_pixels_per_process': n_pixels_per_process,
'noise_method': 'mean', # averaging method ('mean','median','logmexp')
'noise_range': [0.25, 0.5], # range of normalized frequencies over which to average
'p': p, # order of AR indicator dynamics
'pixels': None, # pixels to be excluded due to saturation
'sn': None, # noise level for each pixel
}
self.init = {
'K': k, # number of components,
'SC_kernel': 'heat', # kernel for graph affinity matrix
'SC_sigma' : 1, # std for SC kernel
'SC_thr': 0, # threshold for affinity matrix
'SC_normalize': True, # standardize entries prior to
# computing affinity matrix
'SC_use_NN': False, # sparsify affinity matrix by using
# only nearest neighbors
'SC_nnn': 20, # number of nearest neighbors to use
'alpha_snmf': alpha_snmf,
'center_psf': center_psf,
'gSig': gSig,
# size of bounding box
'gSiz': gSiz,
'init_iter': init_iter,
'kernel': None, # user specified template for greedyROI
'lambda_gnmf' :1, # regularization weight for graph NMF
'maxIter': 5, # number of HALS iterations
'max_iter_snmf': 500,
'method_init': method_init, # can be greedy_roi, corr_pnr sparse_nmf, local_NMF
'min_corr': min_corr,
'min_pnr': min_pnr,
'nIter': 5, # number of refinement iterations
'nb': gnb, # number of global background components
# whether to pixelwise equalize the movies during initialization
'normalize_init': normalize_init,
# dictionary with parameters to pass to local_NMF initializaer
'options_local_NMF': options_local_NMF,
'perc_baseline_snmf': 20,
'ring_size_factor': ring_size_factor,
'rolling_length': rolling_length,
'rolling_sum': rolling_sum,
'seed_method': 'auto',
'sigma_smooth_snmf': (.5, .5, .5),
'ssub': ssub, # spatial downsampling factor
'ssub_B': ssub_B,
'tsub': tsub, # temporal downsampling factor
}
self.spatial = {
'block_size_spat': block_size_spat, # number of pixels to parallelize residual computation ** DECREASE IF MEMORY ISSUES
'dist': 3, # expansion factor of ellipse
'expandCore': iterate_structure(generate_binary_structure(2, 1), 2).astype(int),
# Flag to extract connected components (might want to turn to False for dendritic imaging)
'extract_cc': True,
'maxthr': 0.1, # Max threshold
'medw': None, # window of median filter
# method for determining footprint of spatial components ('ellipse' or 'dilate')
'method_exp': 'dilate',
# 'nnls_L0'. Nonnegative least square with L0 penalty
# 'lasso_lars' lasso lars function from scikit learn
'method_ls': 'lasso_lars',
# number of pixels to be processed by each worker
'n_pixels_per_process': n_pixels_per_process,
'nb': gnb, # number of background components
'normalize_yyt_one': True,
'nrgthr': 0.9999, # Energy threshold
'num_blocks_per_run_spat': num_blocks_per_run_spat, # number of process to parallelize residual computation ** DECREASE IF MEMORY ISSUES
'se': np.ones((3, 3), dtype='uint8'), # Morphological closing structuring element
'ss': np.ones((3, 3), dtype='uint8'), # Binary element for determining connectivity
'thr_method': 'nrg', # Method of thresholding ('max' or 'nrg')
# whether to update the background components in the spatial phase
'update_background_components': update_background_components,
}
self.temporal = {
'ITER': 2, # block coordinate descent iterations
# flag for setting non-negative baseline (otherwise b >= min(y))
'bas_nonneg': False,
# number of pixels to process at the same time for dot product. Make it
# smaller if memory problems
'block_size_temp': block_size_temp, # number of pixels to parallelize residual computation ** DECREASE IF MEMORY ISSUES
# bias correction factor (between 0 and 1, close to 1)
'fudge_factor': .96,
# number of autocovariance lags to be considered for time constant estimation
'lags': 5,
'optimize_g': False, # flag for optimizing time constants
'memory_efficient': False,
# method for solving the constrained deconvolution problem ('oasis','cvx' or 'cvxpy')
# if method cvxpy, primary and secondary (if problem unfeasible for approx
# solution) solvers to be used with cvxpy, can be 'ECOS','SCS' or 'CVXOPT'
'method_deconvolution': method_deconvolution, # 'cvxpy', # 'oasis'
'nb': gnb, # number of background components
'noise_method': 'mean', # averaging method ('mean','median','logmexp')
'noise_range': [.25, .5], # range of normalized frequencies over which to average
'num_blocks_per_run_temp': num_blocks_per_run_temp, # number of process to parallelize residual computation ** DECREASE IF MEMORY ISSUES
'p': p, # order of AR indicator dynamics
's_min': s_min, # minimum spike threshold
'solvers': ['ECOS', 'SCS'],
'verbosity': False,
}
self.merging = {
'do_merge': do_merge,
'merge_thr': merge_thresh,
'merge_parallel': False,
'max_merge_area': max_merge_area
}
self.quality = {
'SNR_lowest': 0.5, # minimum accepted SNR value
'cnn_lowest': 0.1, # minimum accepted value for CNN classifier
'gSig_range': None, # range for gSig scale for CNN classifier
'min_SNR': min_SNR, # transient SNR threshold
'min_cnn_thr': 0.9, # threshold for CNN classifier
'rval_lowest': -1, # minimum accepted space correlation
'rval_thr': rval_thr, # space correlation threshold
'use_cnn': True, # use CNN based classifier
'use_ecc': False, # flag for eccentricity based filtering
'max_ecc': 3
}
self.online = {
'N_samples_exceptionality': N_samples_exceptionality, # timesteps to compute SNR
'batch_update_suff_stat': batch_update_suff_stat,
'dist_shape_update': False, # update shapes in a distributed way
'ds_factor': 1, # spatial downsampling for faster processing
'epochs': 1, # number of epochs
'expected_comps': expected_comps, # number of expected components
'full_XXt': False, # store entire XXt matrix (as opposed to a list of sub-matrices)
'init_batch': 200, # length of mini batch for initialization
'init_method': 'bare', # initialization method for first batch,
'iters_shape': iters_shape, # number of block-CD iterations
'max_comp_update_shape': max_comp_update_shape,
'max_num_added': max_num_added, # maximum number of new components for each frame
'max_shifts_online': 10, # maximum shifts during motion correction
'min_SNR': min_SNR, # minimum SNR for accepting a new trace
'min_num_trial': min_num_trial, # number of mew possible components for each frame
'minibatch_shape': minibatch_shape, # number of frames in each minibatch
'minibatch_suff_stat': minibatch_suff_stat,
'motion_correct': True, # flag for motion correction
'movie_name_online': 'online_movie.mp4', # filename of saved movie (appended to directory where data is located)
'normalize': False, # normalize frame
'n_refit': n_refit, # Additional iterations to simultaneously refit
# path to CNN model for testing new comps
'num_times_comp_updated': num_times_comp_updated,
'opencv_codec': 'H264', # FourCC video codec for saving movie. Check http://www.fourcc.org/codecs.php
'path_to_model': os.path.join(caiman_datadir(), 'model',
'cnn_model_online.h5'),
'ring_CNN': False, # flag for using a ring CNN background model
'rval_thr': rval_thr, # space correlation threshold
'save_online_movie': False, # flag for saving online movie
'show_movie': False, # display movie online
'simultaneously': simultaneously, # demix and deconvolve simultaneously
'sniper_mode': sniper_mode, # flag for using CNN
'stop_detection': False, # flag for stop detecting new neurons at the last epoch
'test_both': test_both, # flag for using both CNN and space correlation
'thresh_CNN_noisy': thresh_CNN_noisy, # threshold for online CNN classifier
'thresh_fitness_delta': thresh_fitness_delta,
'thresh_fitness_raw': thresh_fitness_raw, # threshold for trace SNR (computed below)
'thresh_overlap': thresh_overlap,
'update_freq': update_freq, # update every shape at least once every update_freq steps
'update_num_comps': update_num_comps, # flag for searching for new components
'use_corr_img': use_corr_img, # flag for using correlation image to detect new components
'use_dense': use_dense, # flag for representation and storing of A and b
'use_peak_max': use_peak_max, # flag for finding candidate centroids
'W_update_factor': 1, # update W less often than shapes by a given factor
}
self.motion = {
'border_nan': 'copy', # flag for allowing NaN in the boundaries
'gSig_filt': None, # size of kernel for high pass spatial filtering in 1p data
'is3D': False, # flag for 3D recordings for motion correction
'max_deviation_rigid': 3, # maximum deviation between rigid and non-rigid
'max_shifts': (6, 6), # maximum shifts per dimension (in pixels)
'min_mov': None, # minimum value of movie
'niter_rig': 1, # number of iterations rigid motion correction
'nonneg_movie': True, # flag for producing a non-negative movie
'num_frames_split': 80, # split across time every x frames
'num_splits_to_process_els': None, # DO NOT MODIFY
'num_splits_to_process_rig': None, # DO NOT MODIFY
'overlaps': (32, 32), # overlap between patches in pw-rigid motion correction
'pw_rigid': False, # flag for performing pw-rigid motion correction
'shifts_opencv': True, # flag for applying shifts using cubic interpolation (otherwise FFT)
'splits_els': 14, # number of splits across time for pw-rigid registration
'splits_rig': 14, # number of splits across time for rigid registration
'strides': (96, 96), # how often to start a new patch in pw-rigid registration
'upsample_factor_grid': 4, # motion field upsampling factor during FFT shifts
'use_cuda': False, # flag for using a GPU
'indices': (slice(None), slice(None)) # part of FOV to be corrected
}
self.ring_CNN = {
'n_channels' : 2, # number of "ring" kernels
'use_bias' : False, # use bias in the convolutions
'use_add' : False, # use an additive layer
'pct' : 0.01, # quantile loss specification
'patience' : 3, # patience for early stopping
'max_epochs': 100, # maximum number of epochs
'width': 5, # width of "ring" kernel
'loss_fn': 'pct', # loss function
'lr': 1e-3, # (initial) learning rate
'lr_scheduler': None, # learning rate scheduler function
'path_to_model': None, # path to saved weights
'remove_activity': False, # remove activity of last frame prior to background extraction
'reuse_model': False # reuse an already trained model
}
self.change_params(params_dict)
def check_consistency(self):
""" Populates the params object with some dataset dependent values
and ensures that certain constraints are satisfied.
"""
self.data['last_commit'] = '-'.join(caiman.utils.utils.get_caiman_version())
if self.data['dims'] is None and self.data['fnames'] is not None:
self.data['dims'] = get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[0]
if self.data['fnames'] is not None:
if isinstance(self.data['fnames'], str):
self.data['fnames'] = [self.data['fnames']]
T = get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[1]
if len(self.data['fnames']) > 1:
T = T[0]
num_splits = max(T//max(self.motion['num_frames_split'], 10), 1)
self.motion['splits_els'] = num_splits
self.motion['splits_rig'] = num_splits
if isinstance(self.data['fnames'][0],tuple):
self.online['movie_name_online'] = os.path.join(os.path.dirname(self.data['fnames'][0][0]), self.online['movie_name_online'])
else:
self.online['movie_name_online'] = os.path.join(os.path.dirname(self.data['fnames'][0]), self.online['movie_name_online'])
if self.online['N_samples_exceptionality'] is None:
self.online['N_samples_exceptionality'] = np.ceil(self.data['fr'] * self.data['decay_time']).astype('int')
if self.online['thresh_fitness_raw'] is None:
self.online['thresh_fitness_raw'] = scipy.special.log_ndtr(
-self.online['min_SNR']) * self.online['N_samples_exceptionality']
self.online['max_shifts_online'] = (np.array(self.online['max_shifts_online']) / self.online['ds_factor']).astype(int)
if self.init['gSig'] is None:
self.init['gSig'] = [-1, -1]
if self.init['gSiz'] is None:
self.init['gSiz'] = [2*gs + 1 for gs in self.init['gSig']]
self.init['gSiz'] = tuple([gs + 1 if gs % 2 == 0 else gs for gs in self.init['gSiz']])
if self.patch['rf'] is not None:
if np.any(np.array(self.patch['rf']) <= self.init['gSiz'][0]):
logging.warning("Changing rf from {0} to {1} ".format(self.patch['rf'], 2*self.init['gSiz'][0]) +
"because the constraint rf > gSiz was not satisfied.")
# if self.motion['gSig_filt'] is None:
# self.motion['gSig_filt'] = self.init['gSig']
if self.init['nb'] <= 0 and (self.patch['nb_patch'] != self.init['nb'] or
self.patch['low_rank_background'] is not None):
logging.warning("gnb={0}, hence setting keys nb_patch ".format(self.init['nb']) +
"and low_rank_background in group patch automatically.")
self.set('patch', {'nb_patch': self.init['nb'], 'low_rank_background': None})
if self.init['nb'] == -1 and self.spatial['update_background_components']:
logging.warning("gnb=-1, hence setting key update_background_components " +
"in group spatial automatically to False.")
self.set('spatial', {'update_background_components': False})
if self.init['method_init'] == 'corr_pnr' and self.init['ring_size_factor'] is not None \
and self.init['normalize_init']:
logging.warning("using CNMF-E's ringmodel for background hence setting key " +
"normalize_init in group init automatically to False.")
self.set('init', {'normalize_init': False})
if self.motion['is3D']:
for a in ('indices', 'max_shifts', 'strides', 'overlaps'):
if len(self.motion[a]) != 3:
if self.motion[a][0] == self.motion[a][1]:
self.motion[a] = (self.motion[a][0],) * 3
logging.warning("is3D=True, hence setting key " + a +
" automatically to " + str(self.motion[a]))
else:
raise ValueError(a + ' has to be a tuple of length 3 for volumetric 3D data')
for key in ('max_num_added', 'min_num_trial'):
if (self.online[key] == 0 and self.online['update_num_comps']):
self.set('online', {'update_num_comps': False})
logging.warning(key + "=0, hence setting key update_num_comps " +
"in group online automatically to False.")
def set(self, group, val_dict, set_if_not_exists=False, verbose=False):
""" Add key-value pairs to a group. Existing key-value pairs will be overwritten
if specified in val_dict, but not deleted.
Args:
group: The name of the group.
val_dict: A dictionary with key-value pairs to be set for the group.
set_if_not_exists: Whether to set a key-value pair in a group if the key does not currently exist in the group.
"""
if not hasattr(self, group):
raise KeyError('No group in CNMFParams named {0}'.format(group))
d = getattr(self, group)
for k, v in val_dict.items():
if k not in d and not set_if_not_exists:
if verbose:
logging.warning(
"NOT setting value of key {0} in group {1}, because no prior key existed...".format(k, group))
else:
if np.any(d[k] != v):
logging.info(
"Changing key {0} in group {1} from {2} to {3}".format(k, group, d[k], v))
d[k] = v
def get(self, group, key):
""" Get a value for a given group and key. Raises an exception if no such group/key combination exists.
Args:
group: The name of the group.
key: The key for the property in the group of interest.
Returns: The value for the group/key combination.
"""
if not hasattr(self, group):
raise KeyError('No group in CNMFParams named {0}'.format(group))
d = getattr(self, group)
if key not in d:
raise KeyError('No key {0} in group {1}'.format(key, group))
return d[key]
def get_group(self, group):
""" Get the dictionary of key-value pairs for a group.
Args:
group: The name of the group.
"""
if not hasattr(self, group):
raise KeyError('No group in CNMFParams named {0}'.format(group))
return getattr(self, group)
def __eq__(self, other):
if not instance(other, CNMFParams):
return False
parent_dict1 = self.to_dict()
parent_dict2 = other.to_dict()
key_diff = np.setdiff1d(parent_dict1.keys(), parent_dict2.keys())
if len(key_diff) > 0:
return False
for k1, child_dict1 in parent_dict1.items():
child_dict2 = parent_dict2[k1]
added, removed, modified, same = dict_compare(child_dict1, child_dict2)
if len(added) != 0 or len(removed) != 0 or len(modified) != 0 or len(same) != len(child_dict1):
return False
return True
def to_dict(self):
"""Returns the params class as a dictionary with subdictionaries for each
catergory."""
return {'data': self.data, 'spatial_params': self.spatial, 'temporal_params': self.temporal,
'init_params': self.init, 'preprocess_params': self.preprocess,
'patch_params': self.patch, 'online': self.online, 'quality': self.quality,
'merging': self.merging, 'motion': self.motion, 'ring_CNN': self.ring_CNN
}
def __repr__(self):
formatted_outputs = [
'{}:\n\n{}'.format(group_name, pformat(group_dict))
for group_name, group_dict in self.to_dict().items()
]
return 'CNMFParams:\n\n' + '\n\n'.join(formatted_outputs)
def change_params(self, params_dict, verbose=False):
""" Method for updating the params object by providing a single dictionary.
For each key in the provided dictionary the method will search in all
subdictionaries and will update the value if it finds a match.
Args:
params_dict: dictionary with parameters to be changed and new values
verbose: bool (False). Print message for all keys
"""
for gr in list(self.__dict__.keys()):
self.set(gr, params_dict, verbose=verbose)
for k, v in params_dict.items():
flag = True
for gr in list(self.__dict__.keys()):
d = getattr(self, gr)
if k in d:
flag = False
if flag:
logging.warning('No parameter {0} found!'.format(k))
self.check_consistency()
return self
|
simonsfoundation/CaImAn
|
caiman/source_extraction/cnmf/params.py
|
Python
|
gpl-2.0
| 50,491
|
[
"Gaussian",
"NEURON"
] |
d51e289f10fc66b911f8c46cae58debb2d5ff03daeac62a312a8ef35e3156612
|
"""
Pairwise distance functions between time series in the input space
==================================================================
They all have the following prototype:
function(bcsc1, bcsc2, **kwargs)
"""
import numpy as np
from numpy.linalg import slogdet
from scipy.linalg import solve, eigh
from .utils import compute_autocov, compute_autocorr
from .global_align import tga_dissimilarity
def linear_diff_means(bcsc1, bcsc2):
""" Return the squared Euclidian-distance between time-series' means
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
Returns
-------
ddm: double,
squared Euclidean distance between the means of the time series
"""
m1 = np.asarray(bcsc1.mean(axis=1)).squeeze()
m2 = np.asarray(bcsc2.mean(axis=1)).squeeze()
ddm = ((m2 - m1) ** 2).sum()
return ddm
def linear_mean_diffs(bcsc1, bcsc2):
""" Return the mean of Euclidian-distances between time-series
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
Returns
-------
dmd: double,
mean of the squared Euclidean distances between the time series
"""
T = bcsc1.shape[1]
assert T == bcsc2.shape[1], "the series should be of same duration"
dmd = 1.0 / T * ((bcsc2 - bcsc1).data ** 2).sum()
return dmd
def linear_allpairs(bcsc1, bcsc2):
""" Return the mean of all pairwise dot products (*similarity*) between two-time series
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
Returns
-------
sim_ap: double,
mean of all pairwise frame dot products
Notes
-----
It's a *similarity*, not a distance!
"""
sim_ap = (bcsc1.T * bcsc2).mean() # * sparse matrices == matrix product!!!
return sim_ap
def linear_hsac(bcsc1, bcsc2, tau=1, mntype=0):
""" Return the distance between the auto-covariance matrices of two time-series
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
tau: int, optional, default: 1
lag parameter
mntype: int (default: 0), determines matrix norm used:
0: Frobenius (HS) norm
1: largest eigen-value
Returns
-------
dhsac: double,
distance between the auto-covariance matrices.
"""
d = bcsc1.shape
# autocovariances
acv21 = compute_autocov(bcsc2, tau=tau)
acv21 -= compute_autocov(bcsc1, tau=tau)
# compute the distance
if mntype == 0:
# get the squared Frobenius norm of the difference between auto-covariances
dhsac = np.core.add.reduce((acv21 * acv21).ravel()) # from numpy.linalg.norm
elif mntype == 1:
# get the largest eigenvalue of the difference between auto-covariances
dhsac = eigh(acv21, eigvals_only=True, eigvals=(d - 1, d - 1))[0]
else:
raise ValueError("Invalid matrix norm type ({})".format(mntype))
return dhsac
def linear_nhsac(bcsc1, bcsc2, tau=1, regul=1e-3, check_regul=False, mntype=0):
""" Return the difference between auto-covariances of the time-series,
normalized by the overall variance
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
tau: int, optional, default: 1
lag parameter
regul: float, optional, default: 1e-2
regularization parameter for the inverse computation
if < 0, then set regul to be 50% of the largest singular value
check_regul: boolean, optional, default: False,
if True, then check that the regul parameter is smaller
than the largest eigen-values of the covariance matrices
mntype: int (default: 0), determines matrix norm used:
0: Frobenius (HS) norm
1: largest eigen-value
Returns
-------
dnhsac: double,
variance-normalized distance between the auto-covariance matrices.
"""
d = bcsc1.shape[0] # d: number of vars
T1 = bcsc1.shape[1]
T2 = bcsc2.shape[1]
# autocovariances
acv21 = compute_autocov(bcsc2, tau=tau)
acv21 -= compute_autocov(bcsc1, tau=tau)
# normalize by overall frame covariance matrix
C = np.cov(np.hstack([bcsc1.toarray(), bcsc2.toarray()]))
# add regularization term
if check_regul or regul < 0:
mev = eigh(C, eigvals_only=True, eigvals=(d - 1, d - 1))[0]
if regul < 0:
used_regul = mev * 0.5
else:
assert regul < mev, "Too high regularization parameter"
used_regul = regul
else:
used_regul = regul
C += used_regul * np.eye(d)
# compute the distance
if mntype == 0:
# get the squared Frobenius norm of the normalized difference between auto-covariances
nacv21 = solve(C, acv21, sym_pos=True, overwrite_a=True, overwrite_b=True)
dnhsac = np.core.add.reduce((nacv21 * nacv21).ravel()) # from numpy.linalg.norm
elif mntype == 1:
# get largest eigenvalue of the normalized difference between auto-covariances
dnhsac = eigh(acv21, C, eigvals_only=True, eigvals=(d - 1, d - 1))[0]
else:
raise ValueError("Invalid matrix norm type ({})".format(mntype))
return dnhsac
def linear_diff_autocor(bcsc1, bcsc2, tau=1, regul=1e-3, mntype=0):
""" Distance between the repsective auto-correlation matrices of two time-series
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
tau: int, optional, default: 1
lag parameter
regul: float, optional, default: 1e-2
regularization parameter for the inverse computation
if < 0, then set regul to be 50% of the largest singular value
mntype: int (default: 0), determines matrix norm used:
0: Frobenius (HS) norm
1: largest eigen-value
Returns
-------
daco: double,
distance between the auto-correlation matrices.
Notes
-----
With Frobenius, this is equivalent to the DACO distance with a linear kernel.
"""
d, T = bcsc1.shape # d: number of vars, T: number of observations
# autocorrelations
acr21 = compute_autocorr(bcsc2, tau=tau, regul=regul)
acr21 -= compute_autocorr(bcsc1, tau=tau, regul=regul)
# compute the distance
if mntype == 0:
# get the squared Frobenius norm of the difference between auto-covariances
daco = np.core.add.reduce((acr21 * acr21).ravel()) # from numpy.linalg.norm
elif mntype == 1:
# get the largest eigenvalue of the difference between auto-correlations
daco = eigh(acr21, eigvals_only=True, eigvals=(d - 1, d - 1))[0]
else:
raise ValueError("Invalid matrix norm type ({})".format(mntype))
return daco
def linear_crosscor(bcsc1, bcsc2, regul=1e-3, check_regul=False, mntype=0):
""" Return the cross-correlation between time-series obtained by (linear) CCA
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
tau: int, optional, default: 1
lag parameter
regul: float, optional, default: 1e-2
regularization parameter for the inverse computation
if < 0, then set regul to be 50% of the largest singular value
check_regul: boolean, optional, default: False,
if True, then check that the regul parameter is smaller
than the largest eigen-values of the covariance matrices
mntype: int (default: 0), determines matrix norm used:
0: Frobenius (HS) norm
1: largest eigen-value
Returns
-------
ccsim: double,
norm of the cross-correlation matrix
Notes
-----
Not a distance but a similarity!
"""
d, T = bcsc1.shape # d: number of vars, T: number of observations
assert bcsc2.shape[1] == T, "Series must be of same duration"
# full covariance matrix
C = np.cov(bcsc1.toarray(), bcsc2.toarray())
# add regularization terms
if check_regul or regul < 0:
mev1 = eigh(C[:d, :d], eigvals_only=True, eigvals=(d - 1, d - 1))[0]
mev2 = eigh(C[d:, d:], eigvals_only=True, eigvals=(d - 1, d - 1))[0]
#print " mev1=%f, mev2=%f" % (mev1, mev2) # DEBUG
if regul < 0:
used_regul = min(mev1, mev2) * 0.5
else:
assert regul < mev1 and regul < mev2, "Too high regularization parameter"
used_regul = regul
else:
used_regul = regul
C[:d, :d] += used_regul * np.eye(d)
C[d:, d:] += used_regul * np.eye(d)
# build generalized eigenvalue problem A v = w B v
A = C.copy()
A[:d, :d] = 0.0
A[d:, d:] = 0.0
B = C # .copy()
B[:d, d:] = 0.0
B[d:, :d] = 0.0
# compute the similarity
if mntype == 0:
# get the squared Frobenius norm (trace)
BinvA = solve(B, A)
ccsim = np.core.add.reduce((BinvA * BinvA).ravel()) # from numpy.linalg.norm
elif mntype == 1:
# get largest eigenvalue of generalized eigenvalue problem (assumes B is p.d.)
ccsim = eigh(A, B, eigvals_only=True, eigvals=(2 * d - 1, 2 * d - 1))[0]
elif mntype == 2:
# sum of the largest eigenvalues of generalized eigenvalue problem (assumes B is p.d.)
ccsim = np.sum(eigh(A, B, eigvals_only=True))
else:
raise ValueError("Invalid matrix norm type ({})".format(mntype))
return ccsim
def minus_logGAK(bcsc1, bcsc2, regul=1e0, tau=0):
""" Return minus the normalized log Global Alignment kernel between series
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
tau: int, optional, default: 0,
'triangular' parameter of logGAK
regul: float, optional, default: 1e0,
'sigma' parameter of logGAK
Returns
-------
mlga: double,
minus the normalized log Global Alignment score.
Note
----
This is actually a non-linear kernel, but this function has the same
signature as linear distances.
"""
mlga = tga_dissimilarity(bcsc1.T.toarray(), bcsc2.T.toarray(), regul, tau)
return mlga
def linear_autocov_likelihood_ratio(bcsc1, bcsc2, tau=1):
""" P-value of statistical test where H0 is auto-covariance equality
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
tau: int, optional, default: 1
lag parameter
Returns
-------
daclr: double,
p-value of the statistical test where H0: auto-covs are equal
Notes
-----
Not a distance but a similarity: if it is low, then we can reject the null
hypothesis, i.e. the auto-covariances are different.
This is not exactly correct: we base this similarity on the statistical
test for the equality of covariances (not auto-covariances) matrices under
normality assumptions (i.e. the column vectors are drawn from a Gaussian
distribution).
"""
d = bcsc1.shape[0] # d: number of vars
T1 = bcsc1.shape[1]
T2 = bcsc2.shape[1]
T = T1 + T2
# compute the log det of the autocovariances
A1 = compute_autocov(bcsc1, tau=tau)
det1 = slogdet(A1)[1]
A2 = compute_autocov(bcsc2, tau=tau)
det2 = slogdet(A2)[1]
A = 1. / T * (T1 * A1 + T2 * A2) # mean of the auto-covariances
daclr = max(0.0, T * slogdet(A)[1] - T1 * det1 - T2 * det2)
# Note: threshold is just for numerical issues (very low and small value)
return daclr
# XXX not designed for auto-co{r,v} and sensitive to departure from normality
def linear_autocor_likelihood_ratio(bcsc1, bcsc2, tau=1, regul=1e-3):
""" P-value of statistical test where H0 is auto-correlation equality
Parameters
----------
bcsc1: sparse.csc_matrix object,
contains the sparse column wise representation of time series 1
bcsc2: sparse.csc_matrix object,
contains the sparse column wise representation of time series 2
tau: int, optional, default: 1
lag parameter
regul: float, optional, default: 1e-2
regularization parameter for the inverse computation
if < 0, then set regul to be 50% of the largest singular value
Returns
-------
darlr: double,
p-value of the statistical test where H0: auto-cors are equal
Notes
-----
Not a distance but a similarity: if it is low, then we can reject the null
hypothesis, i.e. the auto-correlations are different.
This is not exactly correct: we base this similarity on the statistical
test for the equality of covariances (not auto-corrleations) matrices under
normality assumptions (i.e. the column vectors are drawn from a Gaussian
distribution).
"""
d = bcsc1.shape[0] # d: number of vars
T1 = bcsc1.shape[1]
T2 = bcsc2.shape[1]
T = T1 + T2
# compute the log det of the autocovariances
A1 = compute_autocorr(bcsc1, tau=tau, regul=regul)
det1 = slogdet(A1)[1]
A2 = compute_autocorr(bcsc2, tau=tau, regul=regul)
det2 = slogdet(A2)[1]
A = 1. / T * (T1 * A1 + T2 * A2) # mean of the auto-covariances
darlr = max(0.0, T * slogdet(A)[1] - T1 * det1 - T2 * det2)
# Note: threshold is just for numerical issues (very low and small value)
return darlr
|
daien/daco
|
distances_linear.py
|
Python
|
mit
| 14,811
|
[
"Gaussian"
] |
735660e1583f08442b0040156c977b2db21b81df8f9206a7a00312fd87af8dae
|
# Audio Tools, a module and set of tools for manipulating audio data
# Copyright (C) 2007-2016 Brian Langenberger
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools.toc.tokrules import tokens
def p_tocfile(t):
'''tocfile : headers tracks'''
from audiotools.toc import TOCFile
args = dict(t[1])
args["tracks"] = t[2]
t[0] = TOCFile(**args)
def p_headers(t):
'''headers : header
| headers header'''
if len(t) == 2:
t[0] = [t[1]]
else:
t[0] = t[1] + [t[2]]
def p_header(t):
'''header : CD_DA
| CD_ROM
| CD_ROM_XA
| CATALOG STRING
| header_cd_text'''
if t[1] in ["CD_DA", "CD_ROM", "CD_ROM_XA"]:
t[0] = ("type", t[1])
elif t[1] == "CATALOG":
t[0] = ("catalog", t[2])
else:
t[0] = ("cd_text", t[1])
def p_header_cd_text(t):
'''header_cd_text : CD_TEXT START_BLOCK language_map language_blocks END_BLOCK'''
from audiotools.toc import CDText
t[0] = CDText(languages=t[4], language_map=t[3])
def p_language_map(t):
'''language_map : LANGUAGE_MAP START_BLOCK language_mappings END_BLOCK'''
from audiotools.toc import CDTextLanguageMap
t[0] = CDTextLanguageMap(t[3])
def p_language_mappings(t):
'''language_mappings : language_mapping
| language_mappings language_mapping'''
if len(t) == 2:
t[0] = [t[1]]
else:
t[0] = t[1] = [t[2]]
def p_language_mapping(t):
'''language_mapping : NUMBER COLON language'''
t[0] = (t[1], t[3])
def p_language(t):
'''language : EN
| NUMBER'''
# FIXME - find list of supported languages
t[0] = t[1]
def p_language_blocks(t):
'''language_blocks : language_block
| language_blocks language_block'''
if len(t) == 2:
t[0] = [t[1]]
else:
t[0] = t[1] + [t[2]]
def p_language_block(t):
'''language_block : LANGUAGE NUMBER START_BLOCK cd_text_items END_BLOCK'''
from audiotools.toc import CDTextLanguage
t[0] = CDTextLanguage(language_id=t[2], text_pairs=t[4])
def p_cd_text_items(t):
'''cd_text_items : cd_text_item
| cd_text_items cd_text_item'''
if len(t) == 2:
t[0] = [t[1]]
else:
t[0] = t[1] + [t[2]]
def p_cd_text_item(t):
'''cd_text_item : TITLE STRING
| PERFORMER STRING
| SONGWRITER STRING
| COMPOSER STRING
| ARRANGER STRING
| MESSAGE STRING
| DISC_ID STRING
| GENRE STRING
| TOC_INFO1 binary
| TOC_INFO2 binary
| UPC_EAN STRING
| ISRC STRING
| SIZE_INFO binary'''
t[0] = (t[1], t[2])
def p_binary(t):
'''binary : START_BLOCK bytes END_BLOCK'''
t[0] = "".join(map(chr, t[2]))
def p_bytes(t):
'''bytes : NUMBER
| NUMBER COMMA bytes'''
if len(t) == 2:
t[0] = [t[1]]
else:
t[0] = [t[1]] + t[3]
def p_tracks(t):
'''tracks : track
| tracks track'''
if len(t) == 2:
t[0] = [t[1]]
else:
t[0] = t[1] + [t[2]]
def p_track(t):
'''track : TRACK track_mode track_flags
| TRACK track_mode sub_channel_mode track_flags'''
from audiotools.toc import TOCTrack
if len(t) == 4:
t[0] = TOCTrack(mode=t[2], flags=t[3])
else:
t[0] = TOCTrack(mode=t[2], flags=t[4], sub_channel_mode=t[3])
def p_track_mode(t):
'''track_mode : AUDIO
| MODE1
| MODE1_RAW
| MODE2
| MODE2_FORM1
| MODE2_FORM2
| MODE2_FORM_MIX
| MODE2_RAW'''
t[0] = t[1]
def p_sub_channel_mode(t):
'''sub_channel_mode : RW
| RW_RAW'''
t[0] = t[1]
def p_track_flags(t):
'''track_flags : track_flag
| track_flags track_flag'''
if len(t) == 2:
t[0] = [t[1]]
else:
t[0] = t[1] + [t[2]]
def p_track_flag(t):
'''track_flag : SILENCE length
| ZERO length
| DATAFILE STRING
| DATAFILE STRING length
| FIFO STRING length
| PREGAP TIMESTAMP'''
# FIXME - handle remaining flags
raise NotImplementedError()
def p_track_cd_text(t):
"track_flag : CD_TEXT START_BLOCK language_blocks END_BLOCK"
from audiotools.toc import CDText
t[0] = CDText(languages=t[3])
def p_track_flag_copy(t):
"track_flag : COPY"
from audiotools.toc import TOCFlag_COPY
t[0] = TOCFlag_COPY(True)
def p_track_flag_no_copy(t):
"track_flag : NO COPY"
from audiotools.toc import TOCFlag_COPY
t[0] = TOCFlag_COPY(False)
def p_track_flag_pre_emphasis(t):
"track_flag : PRE_EMPHASIS"
from audiotools.toc import TOCFlag_PRE_EMPHASIS
t[0] = TOCFlag_PRE_EMPHASIS(True)
def p_track_flag_no_pre_emphasis(t):
"track_flag : NO PRE_EMPHASIS"
from audiotools.toc import TOCFlag_PRE_EMPHASIS
t[0] = TOCFlag_PRE_EMPHASIS(False)
def p_track_flag_two_channels(t):
"track_flag : TWO_CHANNEL_AUDIO"
from audiotools.toc import TOCFlag_CHANNELS
t[0] = TOCFlag_CHANNELS(2)
def p_track_flag_four_channels(t):
"track_flag : FOUR_CHANNEL_AUDIO"
from audiotools.toc import TOCFlag_CHANNELS
t[0] = TOCFlag_CHANNELS(4)
def p_track_flag_isrc(t):
"track_flag : ISRC STRING"
from audiotools.toc import TOCFlag_ISRC
t[0] = TOCFlag_ISRC(t[2])
def p_track_file(t):
'''track_flag : FILE STRING start
| AUDIOFILE STRING start
| FILE STRING start length
| AUDIOFILE STRING start length'''
from audiotools.toc import TOCFlag_FILE
if len(t) == 4:
t[0] = TOCFlag_FILE(type=t[1],
filename=t[2],
start=t[3])
else:
t[0] = TOCFlag_FILE(type=t[1],
filename=t[2],
start=t[3],
length=t[4])
def p_track_start(t):
'''track_flag : START
| START TIMESTAMP'''
from audiotools.toc import TOCFlag_START
if len(t) == 2:
t[0] = TOCFlag_START()
else:
from fractions import Fraction
t[0] = TOCFlag_START(Fraction(t[2], 75))
def p_track_index(t):
"track_flag : INDEX TIMESTAMP"
from audiotools.toc import TOCFlag_INDEX
from fractions import Fraction
t[0] = TOCFlag_INDEX(Fraction(t[2], 75))
def p_start_number(t):
"start : NUMBER"
from fractions import Fraction
t[0] = Fraction(t[1], 44100)
def p_start_timestamp(t):
"start : TIMESTAMP"
from fractions import Fraction
t[0] = Fraction(t[1], 75)
def p_length_number(t):
"length : NUMBER"
from fractions import Fraction
t[0] = Fraction(t[1], 44100)
def p_length_timestamp(t):
"length : TIMESTAMP"
from fractions import Fraction
t[0] = Fraction(t[1], 75)
def p_error(t):
from audiotools.text import ERR_CUE_SYNTAX_ERROR
raise ValueError(ERR_CUE_SYNTAX_ERROR.format(t.lexer.lineno))
|
tuffy/python-audio-tools
|
audiotools/toc/yaccrules.py
|
Python
|
gpl-2.0
| 7,986
|
[
"Brian"
] |
4aec5af637c6704048e3ae3d19c751d6d3408b517d2337609e7308fd181d94c6
|
import operator
import numpy as np
from time import time
from abc import ABCMeta, abstractmethod
import logging
from dipy.segment.metric import Metric
from dipy.segment.metric import ResampleFeature
from dipy.segment.metric import AveragePointwiseEuclideanMetric
from dipy.segment.metric import MinimumAverageDirectFlipMetric
from dipy.tracking.streamline import set_number_of_points, nbytes
logger = logging.getLogger(__name__)
class Identity:
""" Provides identity indexing functionality.
This can replace any class supporting indexing used for referencing
(e.g. list, tuple). Indexing an instance of this class will return the
index provided instead of the element. It does not support slicing.
"""
def __getitem__(self, idx):
return idx
class Cluster(object):
""" Provides functionalities for interacting with a cluster.
Useful container to retrieve index of elements grouped together. If
a reference to the data is provided to `cluster_map`, elements will
be returned instead of their index when possible.
Parameters
----------
cluster_map : `ClusterMap` object
Reference to the set of clusters this cluster is being part of.
id : int
Id of this cluster in its associated `cluster_map` object.
refdata : list (optional)
Actual elements that clustered indices refer to.
Notes
-----
A cluster does not contain actual data but instead knows how to
retrieve them using its `ClusterMap` object.
"""
def __init__(self, id=0, indices=None, refdata=Identity()):
self.id = id
self.refdata = refdata
self.indices = indices if indices is not None else []
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
""" Gets element(s) through indexing.
If a reference to the data was provided (via refdata property)
elements will be returned instead of their index.
Parameters
----------
idx : int, slice or list
Index of the element(s) to get.
Returns
-------
`Cluster` object(s)
When `idx` is a int, returns a single element.
When `idx` is either a slice or a list, returns a list of elements.
"""
if isinstance(idx, int) or isinstance(idx, np.integer):
return self.refdata[self.indices[idx]]
elif type(idx) is slice:
return [self.refdata[i] for i in self.indices[idx]]
elif type(idx) is list:
return [self[i] for i in idx]
msg = "Index must be a int or a slice! Not '{0}'".format(type(idx))
raise TypeError(msg)
def __iter__(self):
return (self[i] for i in range(len(self)))
def __str__(self):
return "[" + ", ".join(map(str, self.indices)) + "]"
def __repr__(self):
return "Cluster(" + str(self) + ")"
def __eq__(self, other):
return isinstance(other, Cluster) and self.indices == other.indices
def __ne__(self, other):
return not self == other
def __cmp__(self, other):
raise TypeError("Cannot compare Cluster objects.")
def assign(self, *indices):
""" Assigns indices to this cluster.
Parameters
----------
*indices : list of indices
Indices to add to this cluster.
"""
self.indices += indices
class ClusterCentroid(Cluster):
""" Provides functionalities for interacting with a cluster.
Useful container to retrieve the indices of elements grouped together and
the cluster's centroid. If a reference to the data is provided to
`cluster_map`, elements will be returned instead of their index when
possible.
Parameters
----------
cluster_map : `ClusterMapCentroid` object
Reference to the set of clusters this cluster is being part of.
id : int
Id of this cluster in its associated `cluster_map` object.
refdata : list (optional)
Actual elements that clustered indices refer to.
Notes
-----
A cluster does not contain actual data but instead knows how to
retrieve them using its `ClusterMapCentroid` object.
"""
def __init__(self, centroid, id=0, indices=None, refdata=Identity()):
super(ClusterCentroid, self).__init__(id, indices, refdata)
self.centroid = centroid.copy()
self.new_centroid = centroid.copy()
def __eq__(self, other):
return (isinstance(other, ClusterCentroid) and
np.all(self.centroid == other.centroid) and
super(ClusterCentroid, self).__eq__(other))
def assign(self, id_datum, features):
""" Assigns a data point to this cluster.
Parameters
----------
id_datum : int
Index of the data point to add to this cluster.
features : 2D array
Data point's features to modify this cluster's centroid.
"""
N = len(self)
self.new_centroid = ((self.new_centroid * N) + features) / (N+1.)
super(ClusterCentroid, self).assign(id_datum)
def update(self):
""" Update centroid of this cluster.
Returns
-------
converged : bool
Tells if the centroid has moved.
"""
converged = np.equal(self.centroid, self.new_centroid)
self.centroid = self.new_centroid.copy()
return converged
class ClusterMap(object):
""" Provides functionalities for interacting with clustering outputs.
Useful container to create, remove, retrieve and filter clusters.
If `refdata` is given, elements will be returned instead of their
index when using `Cluster` objects.
Parameters
----------
refdata : list
Actual elements that clustered indices refer to.
"""
def __init__(self, refdata=Identity()):
self._clusters = []
self.refdata = refdata
@property
def clusters(self):
return self._clusters
@property
def refdata(self):
return self._refdata
@refdata.setter
def refdata(self, value):
if value is None:
value = Identity()
self._refdata = value
for cluster in self.clusters:
cluster.refdata = self._refdata
def __len__(self):
return len(self.clusters)
def __getitem__(self, idx):
""" Gets cluster(s) through indexing.
Parameters
----------
idx : int, slice, list or boolean array
Index of the element(s) to get.
Returns
-------
`Cluster` object(s)
When `idx` is a int, returns a single `Cluster` object.
When `idx`is either a slice, list or boolean array, returns
a list of `Cluster` objects.
"""
if isinstance(idx, np.ndarray) and idx.dtype == np.bool:
return [self.clusters[i]
for i, take_it in enumerate(idx) if take_it]
elif type(idx) is slice:
return [self.clusters[i] for i in range(*idx.indices(len(self)))]
elif type(idx) is list:
return [self.clusters[i] for i in idx]
return self.clusters[idx]
def __iter__(self):
return iter(self.clusters)
def __str__(self):
return "[" + ", ".join(map(str, self)) + "]"
def __repr__(self):
return "ClusterMap(" + str(self) + ")"
def _richcmp(self, other, op):
""" Compares this cluster map with another cluster map or an integer.
Two `ClusterMap` objects are equal if they contain the same clusters.
When comparing a `ClusterMap` object with an integer, the comparison
will be performed on the size of the clusters instead.
Parameters
----------
other : `ClusterMap` object or int
Object to compare to.
op : rich comparison operators (see module `operator`)
Valid operators are: lt, le, eq, ne, gt or ge.
Returns
-------
bool or 1D array (bool)
When comparing to another `ClusterMap` object, it returns whether
the two `ClusterMap` objects contain the same clusters or not.
When comparing to an integer the comparison is performed on the
clusters sizes, it returns an array of boolean.
"""
if isinstance(other, ClusterMap):
if op is operator.eq:
return isinstance(other, ClusterMap) \
and len(self) == len(other) \
and self.clusters == other.clusters
elif op is operator.ne:
return not self == other
raise NotImplementedError(
"Can only check if two ClusterMap instances are equal or not.")
elif isinstance(other, int):
return np.array([op(len(cluster), other) for cluster in self])
msg = ("ClusterMap only supports comparison with a int or another"
" instance of Clustermap.")
raise NotImplementedError(msg)
def __eq__(self, other):
return self._richcmp(other, operator.eq)
def __ne__(self, other):
return self._richcmp(other, operator.ne)
def __lt__(self, other):
return self._richcmp(other, operator.lt)
def __le__(self, other):
return self._richcmp(other, operator.le)
def __gt__(self, other):
return self._richcmp(other, operator.gt)
def __ge__(self, other):
return self._richcmp(other, operator.ge)
def add_cluster(self, *clusters):
""" Adds one or multiple clusters to this cluster map.
Parameters
----------
*clusters : `Cluster` object, ...
Cluster(s) to be added in this cluster map.
"""
for cluster in clusters:
self.clusters.append(cluster)
cluster.refdata = self.refdata
def remove_cluster(self, *clusters):
""" Remove one or multiple clusters from this cluster map.
Parameters
----------
*clusters : `Cluster` object, ...
Cluster(s) to be removed from this cluster map.
"""
for cluster in clusters:
self.clusters.remove(cluster)
def clear(self):
""" Remove all clusters from this cluster map. """
del self.clusters[:]
def size(self):
""" Gets number of clusters contained in this cluster map. """
return len(self)
def clusters_sizes(self):
""" Gets the size of every cluster contained in this cluster map.
Returns
-------
list of int
Sizes of every cluster in this cluster map.
"""
return list(map(len, self))
def get_large_clusters(self, min_size):
""" Gets clusters which contains at least `min_size` elements.
Parameters
----------
min_size : int
Minimum number of elements a cluster needs to have to be selected.
Returns
-------
list of `Cluster` objects
Clusters having at least `min_size` elements.
"""
return self[self >= min_size]
def get_small_clusters(self, max_size):
""" Gets clusters which contains at most `max_size` elements.
Parameters
----------
max_size : int
Maximum number of elements a cluster can have to be selected.
Returns
-------
list of `Cluster` objects
Clusters having at most `max_size` elements.
"""
return self[self <= max_size]
class ClusterMapCentroid(ClusterMap):
""" Provides functionalities for interacting with clustering outputs
that have centroids.
Allows to retrieve easely the centroid of every cluster. Also, it is
a useful container to create, remove, retrieve and filter clusters.
If `refdata` is given, elements will be returned instead of their
index when using `ClusterCentroid` objects.
Parameters
----------
refdata : list
Actual elements that clustered indices refer to.
"""
@property
def centroids(self):
return [cluster.centroid for cluster in self.clusters]
class Clustering(object):
__metaclass__ = ABCMeta
@abstractmethod
def cluster(self, data, ordering=None):
""" Clusters `data`.
Subclasses will perform their clustering algorithm here.
Parameters
----------
data : list of N-dimensional arrays
Each array represents a data point.
ordering : iterable of indices, optional
Specifies the order in which data points will be clustered.
Returns
-------
`ClusterMap` object
Result of the clustering.
"""
msg = "Subclass has to define method 'cluster(data, ordering)'!"
raise NotImplementedError(msg)
class QuickBundles(Clustering):
r""" Clusters streamlines using QuickBundles [Garyfallidis12]_.
Given a list of streamlines, the QuickBundles algorithm sequentially
assigns each streamline to its closest bundle in $\mathcal{O}(Nk)$ where
$N$ is the number of streamlines and $k$ is the final number of bundles.
If for a given streamline its closest bundle is farther than `threshold`,
a new bundle is created and the streamline is assigned to it except if the
number of bundles has already exceeded `max_nb_clusters`.
Parameters
----------
threshold : float
The maximum distance from a bundle for a streamline to be still
considered as part of it.
metric : str or `Metric` object (optional)
The distance metric to use when comparing two streamlines. By default,
the Minimum average Direct-Flip (MDF) distance [Garyfallidis12]_ is
used and streamlines are automatically resampled so they have
12 points.
max_nb_clusters : int
Limits the creation of bundles.
Examples
--------
>>> from dipy.segment.clustering import QuickBundles
>>> from dipy.data import get_fnames
>>> from dipy.io.streamline import load_tractogram
>>> from dipy.tracking.streamline import Streamlines
>>> fname = get_fnames('fornix')
>>> fornix = load_tractogram(fname, 'same',
... bbox_valid_check=False).streamlines
>>> streamlines = Streamlines(fornix)
>>> # Segment fornix with a threshold of 10mm and streamlines resampled
>>> # to 12 points.
>>> qb = QuickBundles(threshold=10.)
>>> clusters = qb.cluster(streamlines)
>>> len(clusters)
4
>>> list(map(len, clusters))
[61, 191, 47, 1]
>>> # Resampling streamlines differently is done explicitly as follows.
>>> # Note this has an impact on the speed and the accuracy (tradeoff).
>>> from dipy.segment.metric import ResampleFeature
>>> from dipy.segment.metric import AveragePointwiseEuclideanMetric
>>> feature = ResampleFeature(nb_points=2)
>>> metric = AveragePointwiseEuclideanMetric(feature)
>>> qb = QuickBundles(threshold=10., metric=metric)
>>> clusters = qb.cluster(streamlines)
>>> len(clusters)
4
>>> list(map(len, clusters))
[58, 142, 72, 28]
References
----------
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience,
vol 6, no 175, 2012.
"""
def __init__(self, threshold, metric="MDF_12points",
max_nb_clusters=np.iinfo('i4').max):
self.threshold = threshold
self.max_nb_clusters = max_nb_clusters
if isinstance(metric, MinimumAverageDirectFlipMetric):
raise ValueError("Use AveragePointwiseEuclideanMetric instead")
if isinstance(metric, Metric):
self.metric = metric
elif metric == "MDF_12points":
feature = ResampleFeature(nb_points=12)
self.metric = AveragePointwiseEuclideanMetric(feature)
else:
raise ValueError("Unknown metric: {0}".format(metric))
def cluster(self, streamlines, ordering=None):
""" Clusters `streamlines` into bundles.
Performs quickbundles algorithm using predefined metric and threshold.
Parameters
----------
streamlines : list of 2D arrays
Each 2D array represents a sequence of 3D points (points, 3).
ordering : iterable of indices
Specifies the order in which data points will be clustered.
Returns
-------
`ClusterMapCentroid` object
Result of the clustering.
"""
from dipy.segment.clustering_algorithms import quickbundles
cluster_map = quickbundles(streamlines, self.metric,
threshold=self.threshold,
max_nb_clusters=self.max_nb_clusters,
ordering=ordering)
cluster_map.refdata = streamlines
return cluster_map
class QuickBundlesX(Clustering):
r""" Clusters streamlines using QuickBundlesX.
Parameters
----------
thresholds : list of float
Thresholds to use for each clustering layer. A threshold represents the
maximum distance from a cluster for a streamline to be still considered
as part of it.
metric : str or `Metric` object (optional)
The distance metric to use when comparing two streamlines. By default,
the Minimum average Direct-Flip (MDF) distance [Garyfallidis12]_ is
used and streamlines are automatically resampled so they have 12
points.
References
----------
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience,
vol 6, no 175, 2012.
.. [Garyfallidis16] Garyfallidis E. et al. QuickBundlesX: Sequential
clustering of millions of streamlines in multiple
levels of detail at record execution time. Proceedings
of the, International Society of Magnetic Resonance
in Medicine (ISMRM). Singapore, 4187, 2016.
"""
def __init__(self, thresholds, metric="MDF_12points"):
self.thresholds = thresholds
if isinstance(metric, MinimumAverageDirectFlipMetric):
raise ValueError("Use AveragePointwiseEuclideanMetric instead")
if isinstance(metric, Metric):
self.metric = metric
elif metric == "MDF_12points":
feature = ResampleFeature(nb_points=12)
self.metric = AveragePointwiseEuclideanMetric(feature)
else:
raise ValueError("Unknown metric: {0}".format(metric))
def cluster(self, streamlines, ordering=None):
""" Clusters `streamlines` into bundles.
Performs QuickbundleX using a predefined metric and thresholds.
Parameters
----------
streamlines : list of 2D arrays
Each 2D array represents a sequence of 3D points (points, 3).
ordering : iterable of indices
Specifies the order in which data points will be clustered.
Returns
-------
`TreeClusterMap` object
Result of the clustering.
"""
from dipy.segment.clustering_algorithms import quickbundlesx
tree = quickbundlesx(streamlines, self.metric,
thresholds=self.thresholds,
ordering=ordering)
tree.refdata = streamlines
return tree
class TreeCluster(ClusterCentroid):
def __init__(self, threshold, centroid, indices=None):
super(TreeCluster, self).__init__(centroid=centroid, indices=indices)
self.threshold = threshold
self.parent = None
self.children = []
def add(self, child):
child.parent = self
self.children.append(child)
@property
def is_leaf(self):
return len(self.children) == 0
def return_indices(self):
return self.children
class TreeClusterMap(ClusterMap):
def __init__(self, root):
self.root = root
self.leaves = []
def _retrieves_leaves(node):
if node.is_leaf:
self.leaves.append(node)
self.traverse_postorder(self.root, _retrieves_leaves)
@property
def refdata(self):
return self._refdata
@refdata.setter
def refdata(self, value):
if value is None:
value = Identity()
self._refdata = value
def _set_refdata(node):
node.refdata = self._refdata
self.traverse_postorder(self.root, _set_refdata)
def traverse_postorder(self, node, visit):
for child in node.children:
self.traverse_postorder(child, visit)
visit(node)
def iter_preorder(self, node):
parent_stack = []
while len(parent_stack) > 0 or node is not None:
if node is not None:
yield node
if len(node.children) > 0:
parent_stack += node.children[1:]
node = node.children[0]
else:
node = None
else:
node = parent_stack.pop()
def __iter__(self):
return self.iter_preorder(self.root)
def get_clusters(self, wanted_level):
clusters = ClusterMapCentroid()
def _traverse(node, level=0):
if level == wanted_level:
clusters.add_cluster(node)
return
for child in node.children:
_traverse(child, level + 1)
_traverse(self.root)
return clusters
def qbx_and_merge(streamlines, thresholds,
nb_pts=20, select_randomly=None, rng=None, verbose=False):
""" Run QuickBundlesX and then run again on the centroids of the last layer
Running again QuickBundles at a layer has the effect of merging
some of the clusters that maybe originally devided because of branching.
This function help obtain a result at a QuickBundles quality but with
QuickBundlesX speed. The merging phase has low cost because it is applied
only on the centroids rather than the entire dataset.
Parameters
----------
streamlines : Streamlines
thresholds : sequence
List of distance thresholds for QuickBundlesX.
nb_pts : int
Number of points for discretizing each streamline
select_randomly : int
Randomly select a specific number of streamlines. If None all the
streamlines are used.
rng : RandomState
If None then RandomState is initialized internally.
verbose : bool, optional.
If True, log information. Default False.
Returns
-------
clusters : obj
Contains the clusters of the last layer of QuickBundlesX after merging.
References
----------
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience,
vol 6, no 175, 2012.
.. [Garyfallidis16] Garyfallidis E. et al. QuickBundlesX: Sequential
clustering of millions of streamlines in multiple
levels of detail at record execution time. Proceedings
of the, International Society of Magnetic Resonance
in Medicine (ISMRM). Singapore, 4187, 2016.
"""
t = time()
len_s = len(streamlines)
if select_randomly is None:
select_randomly = len_s
if rng is None:
rng = np.random.RandomState()
indices = rng.choice(len_s, min(select_randomly, len_s),
replace=False)
sample_streamlines = set_number_of_points(streamlines, nb_pts)
if verbose:
logger.info(' Resampled to {} points'.format(nb_pts))
logger.info(' Size is %0.3f MB' % (nbytes(sample_streamlines),))
logger.info(' Duration of resampling is %0.3f sec.' % (time() - t,))
logger.info(' QBX phase starting...')
qbx = QuickBundlesX(thresholds,
metric=AveragePointwiseEuclideanMetric())
t1 = time()
qbx_clusters = qbx.cluster(sample_streamlines, ordering=indices)
if verbose:
logger.info(' Merging phase starting ...')
qbx_merge = QuickBundlesX([thresholds[-1]],
metric=AveragePointwiseEuclideanMetric())
final_level = len(thresholds)
len_qbx_fl = len(qbx_clusters.get_clusters(final_level))
qbx_ordering_final = rng.choice(len_qbx_fl, len_qbx_fl, replace=False)
qbx_merged_cluster_map = qbx_merge.cluster(
qbx_clusters.get_clusters(final_level).centroids,
ordering=qbx_ordering_final).get_clusters(1)
qbx_cluster_map = qbx_clusters.get_clusters(final_level)
merged_cluster_map = ClusterMapCentroid()
for cluster in qbx_merged_cluster_map:
merged_cluster = ClusterCentroid(centroid=cluster.centroid)
for i in cluster.indices:
merged_cluster.indices.extend(qbx_cluster_map[i].indices)
merged_cluster_map.add_cluster(merged_cluster)
merged_cluster_map.refdata = streamlines
if verbose:
logger.info(' QuickBundlesX time for %d random streamlines'
% (select_randomly,))
logger.info(' Duration %0.3f sec. \n' % (time() - t1,))
return merged_cluster_map
|
FrancoisRheaultUS/dipy
|
dipy/segment/clustering.py
|
Python
|
bsd-3-clause
| 25,605
|
[
"VisIt"
] |
ee67874781b527946e4bd8a27caf583847f5a666e4165ecf53b00affd9d82711
|
# Copyright 2001-2009 Brad Chapman.
# Revisions copyright 2009-2016 by Peter Cock.
# Revisions copyright 2009 by David Winter.
# Revisions copyright 2009-2010 by Leighton Pritchard.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to interact with and run various EMBOSS programs.
These classes follow the AbstractCommandline interfaces for running
programs.
"""
from __future__ import print_function
from Bio.Application import _Option, _Switch, AbstractCommandline
class _EmbossMinimalCommandLine(AbstractCommandline):
"""Base Commandline object for EMBOSS wrappers (PRIVATE).
This is provided for subclassing, it deals with shared options
common to all the EMBOSS tools:
- auto Turn off prompts
- stdout Write standard output
- filter Read standard input, write standard output
- options Prompt for standard and additional values
- debug Write debug output to program.dbg
- verbose Report some/full command line options
- help Report command line options. More
information on associated and general
qualifiers can be found with -help -verbose
- warning Report warnings
- error Report errors
- fatal Report fatal errors
- die Report dying program messages
"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
_Switch(["-auto", "auto"],
"""Turn off prompts.
Automatic mode disables prompting, so we recommend you set
this argument all the time when calling an EMBOSS tool from
Biopython.
"""),
_Switch(["-stdout", "stdout"],
"Write standard output."),
_Switch(["-filter", "filter"],
"Read standard input, write standard output."),
_Switch(["-options", "options"],
"""Prompt for standard and additional values.
If you are calling an EMBOSS tool from within Biopython,
we DO NOT recommend using this option.
"""),
_Switch(["-debug", "debug"],
"Write debug output to program.dbg."),
_Switch(["-verbose", "verbose"],
"Report some/full command line options"),
_Switch(["-help", "help"],
"""Report command line options.
More information on associated and general qualifiers can
be found with -help -verbose
"""),
_Switch(["-warning", "warning"],
"Report warnings."),
_Switch(["-error", "error"],
"Report errors."),
_Switch(["-die", "die"],
"Report dying program messages."),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
AbstractCommandline.__init__(self, cmd, **kwargs)
class _EmbossCommandLine(_EmbossMinimalCommandLine):
"""Base Commandline object for EMBOSS wrappers (PRIVATE).
This is provided for subclassing, it deals with shared options
common to all the EMBOSS tools plus:
- outfile Output filename
"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
_Option(["-outfile", "outfile"],
"Output filename",
filename=True),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
_EmbossMinimalCommandLine.__init__(self, cmd, **kwargs)
def _validate(self):
# Check the outfile, filter, or stdout option has been set.
# We can't simply do this via the required flag for the outfile
# output - this seems the simplest solution.
if not (self.outfile or self.filter or self.stdout):
raise ValueError("You must either set outfile (output filename), "
"or enable filter or stdout (output to stdout).")
return _EmbossMinimalCommandLine._validate(self)
class Primer3Commandline(_EmbossCommandLine):
"""Commandline object for the Primer3 interface from EMBOSS.
The precise set of supported arguments depends on your version of EMBOSS.
This version accepts arguments current at EMBOSS 6.1.0, but in order to
remain backwards compatible also support the old argument names as well.
e.g. Using EMBOSS 6.1.0 or later,
>>> cline = Primer3Commandline(sequence="mysequence.fas", auto=True, hybridprobe=True)
>>> cline.explainflag = True
>>> cline.osizeopt=20
>>> cline.psizeopt=200
>>> cline.outfile = "myresults.out"
>>> cline.bogusparameter = 1967 # Invalid parameter
Traceback (most recent call last):
...
ValueError: Option name bogusparameter was not found.
>>> print(cline)
eprimer3 -auto -outfile=myresults.out -sequence=mysequence.fas -hybridprobe=True -psizeopt=200 -osizeopt=20 -explainflag=True
The equivalent for anyone still using an older version of EMBOSS would be:
>>> cline = Primer3Commandline(sequence="mysequence.fas", auto=True, hybridprobe=True)
>>> cline.explainflag = True
>>> cline.oligosize=20 # Old EMBOSS, instead of osizeopt
>>> cline.productosize=200 # Old EMBOSS, instead of psizeopt
>>> cline.outfile = "myresults.out"
>>> print(cline)
eprimer3 -auto -outfile=myresults.out -sequence=mysequence.fas -hybridprobe=True -productosize=200 -oligosize=20 -explainflag=True
"""
def __init__(self, cmd="eprimer3", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence to choose primers from.",
is_required=True),
_Option(["-task", "task"],
"Tell eprimer3 what task to perform."),
_Option(["-hybridprobe", "hybridprobe"],
"Find an internal oligo to use as a hyb probe."),
_Option(["-numreturn", "numreturn"],
"Maximum number of primer pairs to return."),
_Option(["-includedregion", "includedregion"],
"Subregion of the sequence in which to pick primers."),
_Option(["-target", "target"],
"Sequence to target for flanking primers."),
_Option(["-excludedregion", "excludedregion"],
"Regions to exclude from primer picking."),
_Option(["-forwardinput", "forwardinput"],
"Sequence of a forward primer to check."),
_Option(["-reverseinput", "reverseinput"],
"Sequence of a reverse primer to check."),
_Option(["-gcclamp", "gcclamp"],
"The required number of Gs and Cs at the 3' of each primer."),
_Option(["-osize", "osize"],
"Optimum length of a primer oligo."),
_Option(["-minsize", "minsize"],
"Minimum length of a primer oligo."),
_Option(["-maxsize", "maxsize"],
"Maximum length of a primer oligo."),
_Option(["-otm", "otm"],
"""Melting temperature for primer oligo (OBSOLETE).
Option replaced in EMBOSS 6.6.0 by -opttm
"""),
_Option(["-opttm", "opttm"],
"""Optimum melting temperature for a primer oligo.
Option added in EMBOSS 6.6.0, replacing -otm
"""),
_Option(["-mintm", "mintm"],
"Minimum melting temperature for a primer oligo."),
_Option(["-maxtm", "maxtm"],
"Maximum melting temperature for a primer oligo."),
_Option(["-maxdifftm", "maxdifftm"],
"Maximum difference in melting temperatures between "
"forward and reverse primers."),
_Option(["-ogcpercent", "ogcpercent"],
"Optimum GC% for a primer."),
_Option(["-mingc", "mingc"],
"Minimum GC% for a primer."),
_Option(["-maxgc", "maxgc"],
"Maximum GC% for a primer."),
_Option(["-saltconc", "saltconc"],
"Millimolar salt concentration in the PCR."),
_Option(["-dnaconc", "dnaconc"],
"Nanomolar concentration of annealing oligos in the PCR."),
_Option(["-maxpolyx", "maxpolyx"],
"Maximum allowable mononucleotide repeat length in a primer."),
# Primer length:
_Option(["-productosize", "productosize"],
"""Optimum size for the PCR product (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -psizeopt
"""),
_Option(["-psizeopt", "psizeopt"],
"""Optimum size for the PCR product.
Option added in EMBOSS 6.1.0, replacing -productosize
"""),
_Option(["-productsizerange", "productsizerange"],
"""Acceptable range of length for the PCR product (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -prange
"""),
_Option(["-prange", "prange"],
"""Acceptable range of length for the PCR product.
Option added in EMBOSS 6.1.0, replacing -productsizerange
"""),
# Primer temperature:
_Option(["-productotm", "productotm"],
"""Optimum melting temperature for the PCR product (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -ptmopt
"""),
_Option(["-ptmopt", "ptmopt"],
"""Optimum melting temperature for the PCR product.
Option added in EMBOSS 6.1.0, replacing -productotm
"""),
_Option(["-productmintm", "productmintm"],
"""Minimum allowed melting temperature for the amplicon (OBSOLETE)
Option replaced in EMBOSS 6.1.0 by -ptmmin
"""),
_Option(["-ptmmin", "ptmmin"],
"""Minimum allowed melting temperature for the amplicon."),
Option added in EMBOSS 6.1.0, replacing -productmintm
"""),
_Option(["-productmaxtm", "productmaxtm"],
"""Maximum allowed melting temperature for the amplicon (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -ptmmax
"""),
_Option(["-ptmmax", "ptmmax"],
"""Maximum allowed melting temperature for the amplicon."),
Option added in EMBOSS 6.1.0, replacing -productmaxtm
"""),
# Note to self, should be -oexcludedregion not -oexcluderegion
_Option(["-oexcludedregion", "oexcludedregion"],
"""Do not pick internal oligos in this region."),
Option added in EMBOSS 6.1.0, replacing -oligoexcludedregion.
"""),
_Option(["-oligoexcludedregion", "oligoexcludedregion"],
"""Do not pick internal oligos in this region (OBSOLETE)."),
Option replaced in EMBOSS 6.1.0 by -oexcluderegion.
"""),
_Option(["-oligoinput", "oligoinput"],
"Sequence of the internal oligo."),
# Oligo length:
_Option(["-oligosize", "oligosize"],
"""Optimum length of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -osizeopt.
"""),
_Option(["-osizeopt", "osizeopt"],
"""Optimum length of internal oligo.
Option added in EMBOSS 6.1.0, replaces -oligosize
"""),
_Option(["-oligominsize", "oligominsize"],
"""Minimum length of internal oligo (OBSOLETE)."),
Option replaced in EMBOSS 6.1.0 by -ominsize.
"""),
_Option(["-ominsize", "ominsize"],
"""Minimum length of internal oligo."
Option added in EMBOSS 6.1.0, replaces -oligominsize
"""),
_Option(["-oligomaxsize", "oligomaxsize"],
"""Maximum length of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -omaxsize.
"""),
_Option(["-omaxsize", "omaxsize"],
"""Maximum length of internal oligo.
Option added in EMBOSS 6.1.0, replaces -oligomaxsize
"""),
# Oligo GC temperature:
_Option(["-oligotm", "oligotm"],
"""Optimum melting temperature of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -otmopt.
"""),
_Option(["-otmopt", "otmopt"],
"""Optimum melting temperature of internal oligo.
Option added in EMBOSS 6.1.0.
"""),
_Option(["-oligomintm", "oligomintm"],
"""Minimum melting temperature of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -otmmin.
"""),
_Option(["-otmmin", "otmmin"],
"""Minimum melting temperature of internal oligo.
Option added in EMBOSS 6.1.0, replacing -oligomintm
"""),
_Option(["-oligomaxtm", "oligomaxtm"],
"""Maximum melting temperature of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -otmmax.
"""),
_Option(["-otmmax", "otmmax"],
"""Maximum melting temperature of internal oligo.
Option added in EMBOSS 6.1.0, replacing -oligomaxtm
"""),
# Oligo GC percent:
_Option(["-oligoogcpercent", "oligoogcpercent"],
"""Optimum GC% for internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -ogcopt.
"""),
_Option(["-ogcopt", "ogcopt"],
"""Optimum GC% for internal oligo."
Option added in EMBOSS 6.1.0, replacing -oligoogcpercent
"""),
_Option(["-oligomingc", "oligomingc"],
"""Minimum GC% for internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -ogcmin.
"""),
_Option(["-ogcmin", "ogcmin"],
"""Minimum GC% for internal oligo.
Option added in EMBOSS 6.1.0, replacing -oligomingc
"""),
_Option(["-oligomaxgc", "oligomaxgc"],
"""Maximum GC% for internal oligo.
Option replaced in EMBOSS 6.1.0 by -ogcmax
"""),
_Option(["-ogcmax", "ogcmax"],
"""Maximum GC% for internal oligo."),
Option added in EMBOSS 6.1.0, replacing -oligomaxgc
"""),
# Oligo salt concentration:
_Option(["-oligosaltconc", "oligosaltconc"],
"""Millimolar concentration of salt in the hybridisation."),
Option replaced in EMBOSS 6.1.0 by -osaltconc
"""),
_Option(["-osaltconc", "osaltconc"],
"""Millimolar concentration of salt in the hybridisation."),
Option added in EMBOSS 6.1.0, replacing -oligosaltconc
"""),
_Option(["-oligodnaconc", "oligodnaconc"],
"""Nanomolar concentration of internal oligo in the hybridisation.
Option replaced in EMBOSS 6.1.0 by -odnaconc
"""),
_Option(["-odnaconc", "odnaconc"],
"""Nanomolar concentration of internal oligo in the hybridisation.
Option added in EMBOSS 6.1.0, replacing -oligodnaconc
"""),
# Oligo self complementarity
_Option(["-oligoselfany", "oligoselfany"],
"""Maximum allowable alignment score for self-complementarity (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -oanyself
"""),
_Option(["-oanyself", "oanyself"],
"""Maximum allowable alignment score for self-complementarity."),
Option added in EMBOSS 6.1.0, replacing -oligoselfany
"""),
_Option(["-oligoselfend", "oligoselfend"],
"""Maximum allowable 3`-anchored global alignment score "
for self-complementarity (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -oendself
"""),
_Option(["-oendself", "oendself"],
"""Max 3`-anchored self-complementarity global alignment score.
Option added in EMBOSS 6.1.0, replacing -oligoselfend
"""),
_Option(["-oligomaxpolyx", "oligomaxpolyx"],
"""Maximum length of mononucleotide repeat in internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -opolyxmax
"""),
_Option(["-opolyxmax", "opolyxmax"],
"""Maximum length of mononucleotide repeat in internal oligo."),
Option added in EMBOSS 6.1.0, replacing -oligomaxpolyx
"""),
_Option(["-mispriminglibraryfile", "mispriminglibraryfile"],
"File containing library of sequences to avoid amplifying"),
_Option(["-maxmispriming", "maxmispriming"],
"Maximum allowed similarity of primers to sequences in "
"library specified by -mispriminglibrary"),
_Option(["-oligomaxmishyb", "oligomaxmishyb"],
"""Maximum alignment score for hybridisation of internal oligo to
library specified by -oligomishyblibraryfile (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -omishybmax
"""),
_Option(["-omishybmax", "omishybmax"],
"""Maximum alignment score for hybridisation of internal oligo to
library specified by -mishyblibraryfile.
Option added in EMBOSS 6.1.0, replacing -oligomaxmishyb
"""),
_Option(["-oligomishyblibraryfile", "oligomishyblibraryfile"],
"""Library file of seqs to avoid internal oligo hybridisation (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -mishyblibraryfile
"""),
_Option(["-mishyblibraryfile", "mishyblibraryfile"],
"""Library file of seqs to avoid internal oligo hybridisation.
Option added in EMBOSS 6.1.0, replacing -oligomishyblibraryfile
"""),
_Option(["-explainflag", "explainflag"],
"Produce output tags with eprimer3 statistics"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class PrimerSearchCommandline(_EmbossCommandLine):
"""Commandline object for the primersearch program from EMBOSS."""
def __init__(self, cmd="primersearch", **kwargs):
self.parameters = [
_Option(["-seqall", "-sequences", "sequences", "seqall"],
"Sequence to look for the primer pairs in.",
is_required=True),
# When this wrapper was written primersearch used -sequences
# as the argument name. Since at least EMBOSS 5.0 (and
# perhaps earlier) this has been -seqall instead.
_Option(["-infile", "-primers", "primers", "infile"],
"File containing the primer pairs to search for.",
filename=True,
is_required=True),
# When this wrapper was written primersearch used -primers
# as the argument name. Since at least EMBOSS 5.0 (and
# perhaps earlier) this has been -infile instead.
_Option(["-mismatchpercent", "mismatchpercent"],
"Allowed percentage mismatch (any integer value, default 0).",
is_required=True),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FDNADistCommandline(_EmbossCommandLine):
"""Commandline object for the fdnadist program from EMBOSS.
fdnadist is an EMBOSS wrapper for the PHYLIP program dnadist for
calulating distance matrices from DNA sequence files.
"""
def __init__(self, cmd="fdnadist", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to use (phylip)",
filename=True,
is_required=True),
_Option(["-method", "method"],
"sub. model [f,k,j,l,s]",
is_required=True),
_Option(["-gamma", "gamma"],
"gamma [g, i,n]"),
_Option(["-ncategories", "ncategories"],
"number of rate catergories (1-9)"),
_Option(["-rate", "rate"],
"rate for each category"),
_Option(["-categories", "categories"],
"File of substitution rate categories"),
_Option(["-weights", "weights"],
"weights file"),
_Option(["-gammacoefficient", "gammacoefficient"],
"value for gamma (> 0.001)"),
_Option(["-invarfrac", "invarfrac"],
"proportoin of invariant sites"),
_Option(["-ttratio", "ttratio"],
"ts/tv ratio"),
_Option(["-freqsfrom", "freqsfrom"],
"use emprical base freqs"),
_Option(["-basefreq", "basefreq"],
"specify basefreqs"),
_Option(["-lower", "lower"],
"lower triangle matrix (y/N)"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FTreeDistCommandline(_EmbossCommandLine):
"""Commandline object for the ftreedist program from EMBOSS.
ftreedist is an EMBOSS wrapper for the PHYLIP program treedist used for
calulating distance measures between phylogentic trees.
"""
def __init__(self, cmd="ftreedist", **kwargs):
self.parameters = [
_Option(["-intreefile", "intreefile"],
"tree file to score (phylip)",
filename=True,
is_required=True),
_Option(["-dtype", "dtype"],
"distance type ([S]ymetric, [b]ranch score)"),
_Option(["-pairing", "pairing"],
"tree pairing method ([A]djacent pairs, all [p]ossible pairs)"),
_Option(["-style", "style"],
"output style - [V]erbose, [f]ill, [s]parse"),
_Option(["-noroot", "noroot"],
"treat trees as rooted [N/y]"),
_Option(["-outgrno", "outgrno"],
"which taxon to root the trees with (starts from 0)"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FNeighborCommandline(_EmbossCommandLine):
"""Commandline object for the fneighbor program from EMBOSS.
fneighbor is an EMBOSS wrapper for the PHYLIP program neighbor used for
calulating neighbor-joining or UPGMA trees from distance matrices.
"""
def __init__(self, cmd="fneighbor", **kwargs):
self.parameters = [
_Option(["-datafile", "datafile"],
"dist file to use (phylip)",
filename=True,
is_required=True),
_Option(["-matrixtype", "matrixtype"],
"is martrix [S]quare pr [u]pper or [l]ower"),
_Option(["-treetype", "treetype"],
"nj or UPGMA tree (n/u)"),
_Option(["-outgrno", "outgrno"],
"taxon to use as OG"),
_Option(["-jumble", "jumble"],
"randommise input order (Y/n)"),
_Option(["-seed", "seed"],
"provide a random seed"),
_Option(["-trout", "trout"],
"write tree (Y/n)"),
_Option(["-outtreefile", "outtreefile"],
"filename for output tree"),
_Option(["-progress", "progress"],
"print progress (Y/n)"),
_Option(["-treeprint", "treeprint"],
"print tree (Y/n)"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FSeqBootCommandline(_EmbossCommandLine):
"""Commandline object for the fseqboot program from EMBOSS.
fseqboot is an EMBOSS wrapper for the PHYLIP program seqboot used to
pseudo-sample alignment files.
"""
def __init__(self, cmd="fseqboot", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to sample (phylip)",
filename=True,
is_required=True),
_Option(["-categories", "catergories"],
"file of input categories"),
_Option(["-weights", "weights"],
" weights file"),
_Option(["-test", "test"],
"specify operation, default is bootstrap"),
_Option(["-regular", "regular"],
"absolute number to resample"),
_Option(["-fracsample", "fracsample"],
"fraction to resample"),
_Option(["-rewriteformat", "rewriteformat"],
"output format ([P]hyilp, [n]exus, [x]ml"),
_Option(["-seqtype", "seqtype"],
"output format ([D]na, [p]rotein, [r]na"),
_Option(["-blocksize", "blocksize"],
"print progress (Y/n)"),
_Option(["-reps", "reps"],
"how many replicates, defaults to 100)"),
_Option(["-justweights", "jusweights"],
"what to write out [D]atasets of just [w]eights"),
_Option(["-seed", "seed"],
"specify random seed"),
_Option(["-dotdiff", "dotdiff"],
"Use dot-differencing? [Y/n]"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FDNAParsCommandline(_EmbossCommandLine):
"""Commandline object for the fdnapars program from EMBOSS.
fdnapars is an EMBOSS version of the PHYLIP program dnapars, for
estimating trees from DNA sequences using parsiomny. Calling this command
without providing a value for the option "-intreefile" will invoke
"interactive mode" (and as a result fail if called with subprocess) if
"-auto" is not set to true.
"""
def __init__(self, cmd="fdnapars", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to use (phylip)",
filename=True,
is_required=True),
_Option(["-intreefile", "intreefile"],
"Phylip tree file"),
_Option(["-weights", "weights"],
"weights file"),
_Option(["-maxtrees", "maxtrees"],
"max trees to save during run"),
_Option(["-thorough", "thorough"],
"more thorough search (Y/n)"),
_Option(["-rearrange", "rearrange"],
"Rearrange on just 1 best tree (Y/n)"),
_Option(["-transversion", "transversion"],
"Use tranversion parsimony (y/N)"),
_Option(["-njumble", "njumble"],
"number of times to randomise input order (default is 0)"),
_Option(["-seed", "seed"],
"provide random seed"),
_Option(["-outgrno", "outgrno"],
"Specify outgroup"),
_Option(["-thresh", "thresh"],
"Use threshold parsimony (y/N)"),
_Option(["-threshold", "threshold"],
"Threshold value"),
_Option(["-trout", "trout"],
"Write trees to file (Y/n)"),
_Option(["-outtreefile", "outtreefile"],
"filename for output tree"),
_Option(["-dotdiff", "dotdiff"],
"Use dot-differencing? [Y/n]"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FProtParsCommandline(_EmbossCommandLine):
"""Commandline object for the fdnapars program from EMBOSS.
fprotpars is an EMBOSS version of the PHYLIP program protpars, for
estimating trees from protein sequences using parsiomny. Calling this
command without providing a value for the option "-intreefile" will invoke
"interactive mode" (and as a result fail if called with subprocess) if
"-auto" is not set to true.
"""
def __init__(self, cmd="fprotpars", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to use (phylip)",
filename=True,
is_required=True),
_Option(["-intreefile", "intreefile"],
"Phylip tree file to score"),
_Option(["-outtreefile", "outtreefile"],
"phylip tree output file",
filename=True,
is_required=True),
_Option(["-weights", "weights"],
"weights file"),
_Option(["-whichcode", "whichcode"],
"which genetic code, [U,M,V,F,Y]]"),
_Option(["-njumble", "njumble"],
"number of times to randomise input order (default is 0)"),
_Option(["-seed", "seed"],
"provide random seed"),
_Option(["-outgrno", "outgrno"],
"Specify outgroup"),
_Option(["-thresh", "thresh"],
"Use threshold parsimony (y/N)"),
_Option(["-threshold", "threshold"],
"Threshold value"),
_Option(["-trout", "trout"],
"Write trees to file (Y/n)"),
_Option(["-dotdiff", "dotdiff"],
"Use dot-differencing? [Y/n]"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FProtDistCommandline(_EmbossCommandLine):
"""Commandline object for the fprotdist program from EMBOSS.
fprotdist is an EMBOSS wrapper for the PHYLIP program protdist used to
estimate trees from protein sequences using parsimony
"""
def __init__(self, cmd="fprotdist", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to use (phylip)",
filename=True,
is_required=True),
_Option(["-ncategories", "ncategories"],
"number of rate catergories (1-9)"),
_Option(["-rate", "rate"],
"rate for each category"),
_Option(["-catergories", "catergories"],
"file of rates"),
_Option(["-weights", "weights"],
"weights file"),
_Option(["-method", "method"],
"sub. model [j,h,d,k,s,c]"),
_Option(["-gamma", "gamma"],
"gamma [g, i,c]"),
_Option(["-gammacoefficient", "gammacoefficient"],
"value for gamma (> 0.001)"),
_Option(["-invarcoefficient", "invarcoefficient"],
"float for variation of substitution rate among sites"),
_Option(["-aacateg", "aacateg"],
"Choose the category to use [G,C,H]"),
_Option(["-whichcode", "whichcode"],
"genetic code [c,m,v,f,y]"),
_Option(["-ease", "ease"],
"Pob change catergory (float between -0 and 1)"),
_Option(["-ttratio", "ttratio"],
"Transition/transversion ratio (0-1)"),
_Option(["-basefreq", "basefreq"],
"DNA base frequencies (space separated list)"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FConsenseCommandline(_EmbossCommandLine):
"""Commandline object for the fconsense program from EMBOSS.
fconsense is an EMBOSS wrapper for the PHYLIP program consense used to
calculate consensus trees.
"""
def __init__(self, cmd="fconsense", **kwargs):
self.parameters = [
_Option(["-intreefile", "intreefile"],
"file with phylip trees to make consensus from",
filename=True,
is_required=True),
_Option(["-method", "method"],
"consensus method [s, mr, MRE, ml]"),
_Option(["-mlfrac", "mlfrac"],
"cut-off freq for branch to appear in consensus (0.5-1.0)"),
_Option(["-root", "root"],
"treat trees as rooted (YES, no)"),
_Option(["-outgrno", "outgrno"],
"OTU to use as outgroup (starts from 0)"),
_Option(["-trout", "trout"],
"treat trees as rooted (YES, no)"),
_Option(["-outtreefile", "outtreefile"],
"Phylip tree output file (optional)"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class WaterCommandline(_EmbossCommandLine):
"""Commandline object for the water program from EMBOSS.
"""
def __init__(self, cmd="water", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to align",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to align",
filename=True,
is_required=True),
_Option(["-gapopen", "gapopen"],
"Gap open penalty",
is_required=True),
_Option(["-gapextend", "gapextend"],
"Gap extension penalty",
is_required=True),
_Option(["-datafile", "datafile"],
"Matrix file",
filename=True),
_Switch(["-nobrief", "nobrief"],
"Display extended identity and similarity"),
_Switch(["-brief", "brief"],
"Display brief identity and similarity"),
_Option(["-similarity", "similarity"],
"Display percent identity and similarity"),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class NeedleCommandline(_EmbossCommandLine):
"""Commandline object for the needle program from EMBOSS."""
def __init__(self, cmd="needle", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to align",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to align",
filename=True,
is_required=True),
_Option(["-gapopen", "gapopen"],
"Gap open penalty",
is_required=True),
_Option(["-gapextend", "gapextend"],
"Gap extension penalty",
is_required=True),
_Option(["-datafile", "datafile"],
"Matrix file",
filename=True),
_Option(["-endweight", "endweight"],
"Apply And gap penalties"),
_Option(["-endopen", "endopen"],
"The score taken away when an end gap is created."),
_Option(["-endextend", "endextend"],
"The score added to the end gap penality for each base or "
"residue in the end gap."),
_Switch(["-nobrief", "nobrief"],
"Display extended identity and similarity"),
_Switch(["-brief", "brief"],
"Display brief identity and similarity"),
_Option(["-similarity", "similarity"],
"Display percent identity and similarity"),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class NeedleallCommandline(_EmbossCommandLine):
"""Commandline object for the needleall program from EMBOSS."""
def __init__(self, cmd="needleall", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to align",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to align",
filename=True,
is_required=True),
_Option(["-gapopen", "gapopen"],
"Gap open penalty",
is_required=True),
_Option(["-gapextend", "gapextend"],
"Gap extension penalty",
is_required=True),
_Option(["-datafile", "datafile"],
"Matrix file",
filename=True),
_Option(["-minscore", "minscore"],
"Exclude alignments with scores below this threshold score."),
_Option(["-errorfile", "errorfile"],
"Error file to be written to."),
_Option(["-endweight", "endweight"],
"Apply And gap penalties"),
_Option(["-endopen", "endopen"],
"The score taken away when an end gap is created."),
_Option(["-endextend", "endextend"],
"The score added to the end gap penality for each base or "
"residue in the end gap."),
_Switch(["-nobrief", "nobrief"],
"Display extended identity and similarity"),
_Switch(["-brief", "brief"],
"Display brief identity and similarity"),
_Option(["-similarity", "similarity"],
"Display percent identity and similarity"),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class StretcherCommandline(_EmbossCommandLine):
"""Commandline object for the stretcher program from EMBOSS."""
def __init__(self, cmd="stretcher", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to align",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to align",
filename=True,
is_required=True),
_Option(["-gapopen", "gapopen"],
"Gap open penalty",
is_required=True,
checker_function=lambda value: isinstance(value, int)),
_Option(["-gapextend", "gapextend"],
"Gap extension penalty",
is_required=True,
checker_function=lambda value: isinstance(value, int)),
_Option(["-datafile", "datafile"],
"Matrix file",
filename=True),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FuzznucCommandline(_EmbossCommandLine):
"""Commandline object for the fuzznuc program from EMBOSS."""
def __init__(self, cmd="fuzznuc", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence database USA",
is_required=True),
_Option(["-pattern", "pattern"],
"Search pattern, using standard IUPAC one-letter codes",
is_required=True),
_Option(["-mismatch", "mismatch"],
"Number of mismatches",
is_required=True),
_Option(["-complement", "complement"],
"Search complementary strand"),
_Option(["-rformat", "rformat"],
"Specify the report format to output in."),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class Est2GenomeCommandline(_EmbossCommandLine):
"""Commandline object for the est2genome program from EMBOSS."""
def __init__(self, cmd="est2genome", **kwargs):
self.parameters = [
_Option(["-est", "est"],
"EST sequence(s)",
is_required=True),
_Option(["-genome", "genome"],
"Genomic sequence",
is_required=True),
_Option(["-match", "match"],
"Score for matching two bases"),
_Option(["-mismatch", "mismatch"],
"Cost for mismatching two bases"),
_Option(["-gappenalty", "gappenalty"],
"Cost for deleting a single base in either sequence, "
"excluding introns"),
_Option(["-intronpenalty", "intronpenalty"],
"Cost for an intron, independent of length."),
_Option(["-splicepenalty", "splicepenalty"],
"Cost for an intron, independent of length "
"and starting/ending on donor-acceptor sites"),
_Option(["-minscore", "minscore"],
"Exclude alignments with scores below this threshold score."),
_Option(["-reverse", "reverse"],
"Reverse the orientation of the EST sequence"),
_Option(["-splice", "splice"],
"Use donor and acceptor splice sites."),
_Option(["-mode", "mode"],
"This determines the comparion mode. 'both', 'forward' "
"'reverse'"),
_Option(["-best", "best"],
"You can print out all comparisons instead of just the best"),
_Option(["-space", "space"],
"for linear-space recursion."),
_Option(["-shuffle", "shuffle"],
"Shuffle"),
_Option(["-seed", "seed"],
"Random number seed"),
_Option(["-align", "align"],
"Show the alignment."),
_Option(["-width", "width"],
"Alignment width"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class ETandemCommandline(_EmbossCommandLine):
"""Commandline object for the etandem program from EMBOSS."""
def __init__(self, cmd="etandem", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence",
filename=True,
is_required=True),
_Option(["-minrepeat", "minrepeat"],
"Minimum repeat size",
is_required=True),
_Option(["-maxrepeat", "maxrepeat"],
"Maximum repeat size",
is_required=True),
_Option(["-threshold", "threshold"],
"Threshold score"),
_Option(["-mismatch", "mismatch"],
"Allow N as a mismatch"),
_Option(["-uniform", "uniform"],
"Allow uniform consensus"),
_Option(["-rformat", "rformat"],
"Output report format"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class EInvertedCommandline(_EmbossCommandLine):
"""Commandline object for the einverted program from EMBOSS."""
def __init__(self, cmd="einverted", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence",
filename=True,
is_required=True),
_Option(["-gap", "gap"],
"Gap penalty",
filename=True,
is_required=True),
_Option(["-threshold", "threshold"],
"Minimum score threshold",
is_required=True),
_Option(["-match", "match"],
"Match score",
is_required=True),
_Option(["-mismatch", "mismatch"],
"Mismatch score",
is_required=True),
_Option(["-maxrepeat", "maxrepeat"],
"Maximum separation between the start and end of repeat"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class PalindromeCommandline(_EmbossCommandLine):
"""Commandline object for the palindrome program from EMBOSS."""
def __init__(self, cmd="palindrome", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence",
filename=True,
is_required=True),
_Option(["-minpallen", "minpallen"],
"Minimum palindrome length",
is_required=True),
_Option(["-maxpallen", "maxpallen"],
"Maximum palindrome length",
is_required=True),
_Option(["-gaplimit", "gaplimit"],
"Maximum gap between repeats",
is_required=True),
_Option(["-nummismatches", "nummismatches"],
"Number of mismatches allowed",
is_required=True),
_Option(["-overlap", "overlap"],
"Report overlapping matches",
is_required=True),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class TranalignCommandline(_EmbossCommandLine):
"""Commandline object for the tranalign program from EMBOSS."""
def __init__(self, cmd="tranalign", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"Nucleotide sequences to be aligned.",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Protein sequence alignment",
filename=True,
is_required=True),
_Option(["-outseq", "outseq"],
"Output sequence file.",
filename=True,
is_required=True),
_Option(["-table", "table"],
"Code to use"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class DiffseqCommandline(_EmbossCommandLine):
"""Commandline object for the diffseq program from EMBOSS."""
def __init__(self, cmd="diffseq", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to compare",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to compare",
filename=True,
is_required=True),
_Option(["-wordsize", "wordsize"],
"Word size to use for comparisons (10 default)",
is_required=True),
_Option(["-aoutfeat", "aoutfeat"],
"File for output of first sequence's features",
filename=True,
is_required=True),
_Option(["-boutfeat", "boutfeat"],
"File for output of second sequence's features",
filename=True,
is_required=True),
_Option(["-rformat", "rformat"],
"Output report file format")
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class IepCommandline(_EmbossCommandLine):
"""Commandline for EMBOSS iep: calculated isoelectric point and charge.
Example:
>>> from Bio.Emboss.Applications import IepCommandline
>>> iep_cline = IepCommandline(sequence="proteins.faa",
... outfile="proteins.txt")
>>> print(iep_cline)
iep -outfile=proteins.txt -sequence=proteins.faa
You would typically run the command line with iep_cline() or via the
Python subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="iep", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Protein sequence(s) filename",
filename=True,
is_required=True),
_Option(["-amino", "amino"],
"""Number of N-termini
Integer 0 (default) or more.
"""),
_Option(["-carboxyl", "carboxyl"],
"""Number of C-termini
Integer 0 (default) or more.
"""),
_Option(["-lysinemodified", "lysinemodified"],
"""Number of modified lysines
Integer 0 (default) or more.
"""),
_Option(["-disulphides", "disulphides"],
"""Number of disulphide bridges
Integer 0 (default) or more.
"""),
# Should we implement the -termini switch as well?
_Option(["-notermini", "notermini"],
"Exclude (True) or include (False) charge at N and C terminus."),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
# seqret uses -outseq, not -outfile, so use the base class:
class SeqretCommandline(_EmbossMinimalCommandLine):
"""Commandline object for the seqret program from EMBOSS.
This tool allows you to interconvert between different sequence file
formats (e.g. GenBank to FASTA). Combining Biopython's Bio.SeqIO module
with seqret using a suitable intermediate file format can allow you to
read/write to an even wider range of file formats.
This wrapper currently only supports the core functionality, things like
feature tables (in EMBOSS 6.1.0 onwards) are not yet included.
"""
def __init__(self, cmd="seqret", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Input sequence(s) filename",
filename=True),
_Option(["-outseq", "outseq"],
"Output sequence file.",
filename=True),
_Option(["-sformat", "sformat"],
"Input sequence(s) format (e.g. fasta, genbank)"),
_Option(["-osformat", "osformat"],
"Output sequence(s) format (e.g. fasta, genbank)"),
]
_EmbossMinimalCommandLine.__init__(self, cmd, **kwargs)
def _validate(self):
# Check the outfile, filter, or stdout option has been set.
# We can't simply do this via the required flag for the outfile
# output - this seems the simplest solution.
if not (self.outseq or self.filter or self.stdout):
raise ValueError("You must either set outfile (output filename), "
"or enable filter or stdout (output to stdout).")
if not (self.sequence or self.filter or self.stdint):
raise ValueError("You must either set sequence (input filename), "
"or enable filter or stdin (input from stdin).")
return _EmbossMinimalCommandLine._validate(self)
class SeqmatchallCommandline(_EmbossCommandLine):
""" Commandline object for the seqmatchall program from EMBOSS
e.g.
>>> cline = SeqmatchallCommandline(sequence="opuntia.fasta", outfile="opuntia.txt")
>>> cline.auto = True
>>> cline.wordsize = 18
>>> cline.aformat = "pair"
>>> print(cline)
seqmatchall -auto -outfile=opuntia.txt -sequence=opuntia.fasta -wordsize=18 -aformat=pair
"""
def __init__(self, cmd="seqmatchall", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Readable set of sequences",
filename=True,
is_required=True),
_Option(["-wordsize", "wordsize"],
"Word size (Integer 2 or more, default 4)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/Emboss/Applications.py
|
Python
|
mit
| 55,897
|
[
"Biopython"
] |
9da6f037dee1f5eabd293cd6377a0f29bcd56f923c801599000b362a00d8dfc1
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from util import *
from pattern import graph
from pattern.graph import commonsense
#-------------------------------------------------------------------------
class TestUtilityFunctions(unittest.TestCase):
def setUp(self):
pass
def test_deepcopy(self):
# Object with a copy() method are responsible for deep-copying
# themselves.
class MyObject:
def __init__(self, i):
self.i = i
def copy(self):
return MyObject(graph.deepcopy(self.i))
# Assert deep copy for different types.
for o1 in (
None, True, False,
"a", u"a",
1, 1.0, 1, complex(1),
list([1]), tuple([1]), set([1]), frozenset([1]),
dict(a=1), {frozenset(["a"]): 1}, {MyObject(1): 1},
MyObject(1)):
o2 = graph.deepcopy(o1)
if isinstance(o2, (list, tuple, set, dict, MyObject)):
self.assertTrue(id(o1) != id(o2))
print("pattern.graph.deepcopy()")
def test_unique(self):
# Assert list copy with unique items.
v = graph.unique([1, 1, 1])
self.assertEqual(len(v), 1)
self.assertEqual(v[0], 1)
print("pattern.graph.unique()")
def test_coordinates(self):
# Assert 2D coordinates.
x, y = graph.coordinates(10, 10, 100, 30)
self.assertAlmostEqual(x, 96.60, places=2)
self.assertAlmostEqual(y, 60.00, places=2)
print("pattern.graph.coordinates()")
#-------------------------------------------------------------------------
class TestNode(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_node("a", radius=5, stroke=(
0, 0, 0, 1), strokewidth=1, fill=None, text=(0, 0, 0, 1))
self.g.add_node("b", radius=5)
self.g.add_node("c", radius=5)
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_node(self):
# Assert node properties.
n = self.g["a"]
self.assertTrue(isinstance(n, graph.Node))
self.assertTrue(n == self.g["a"])
self.assertTrue(n != self.g["b"])
self.assertTrue(n.graph == self.g)
self.assertTrue(n._distance == self.g.distance)
self.assertTrue(n.id == "a")
self.assertTrue(n.x == 0.0)
self.assertTrue(n.y == 0.0)
self.assertTrue(n.force.x == graph.Vector(0.0, 0.0).x)
self.assertTrue(n.force.y == graph.Vector(0.0, 0.0).y)
self.assertTrue(n.radius == 5)
self.assertTrue(n.fill == None)
self.assertTrue(n.stroke == (0, 0, 0, 1))
self.assertTrue(n.strokewidth == 1)
self.assertTrue(n.text.string == u"a")
self.assertTrue(n.text.width == 85)
self.assertTrue(n.text.fill == (0, 0, 0, 1))
self.assertTrue(n.text.fontsize == 11)
self.assertTrue(n.fixed == False)
self.assertTrue(n.weight == 0)
self.assertTrue(n.centrality == 0)
print("pattern.graph.Node")
def test_edge(self):
# Assert node edges.
n1 = self.g["a"]
n2 = self.g["b"]
self.assertTrue(n1.edges[0].node1.id == "a")
self.assertTrue(n1.edges[0].node2.id == "b")
self.assertTrue(n1.links[0].id == "b")
self.assertTrue(n1.links[0] == self.g.edges[0].node2)
self.assertTrue(n1.links.edge("b") == self.g.edges[0])
self.assertTrue(n1.links.edge(n2) == self.g.edges[0])
print("pattern.graph.Node.links")
print("pattern.graph.Node.edges")
def test_flatten(self):
# Assert node spreading activation.
n = self.g["a"]
self.assertTrue(set(n.flatten(depth=0)) == set([n]))
self.assertTrue(set(n.flatten(depth=1)) == set([n, n.links[0]]))
self.assertTrue(set(n.flatten(depth=2)) == set(self.g.nodes))
print("pattern.graph.Node.flatten()")
def test_text(self):
n = self.g.add_node("d", text=None)
self.assertTrue(n.text == None)
print("pattern.graph.Node.text")
#-------------------------------------------------------------------------
class TestEdge(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_node("a")
self.g.add_node("b")
self.g.add_edge("a", "b", weight=0.0, length=1.0,
type="is-a", stroke=(0, 0, 0, 1), strokewidth=1)
def test_edge(self):
# Assert edge properties.
e = self.g.edges[0]
self.assertTrue(isinstance(e, graph.Edge))
self.assertTrue(e.node1 == self.g["a"])
self.assertTrue(e.node2 == self.g["b"])
self.assertTrue(e.weight == 0.0)
self.assertTrue(e.length == 1.0)
self.assertTrue(e.type == "is-a")
self.assertTrue(e.stroke == (0, 0, 0, 1))
self.assertTrue(e.strokewidth == 1)
print("pattern.graph.Edge")
#-------------------------------------------------------------------------
class TestGraph(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph(layout=graph.SPRING, distance=10.0)
self.g.add_node("a")
self.g.add_node("b")
self.g.add_node("c")
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_graph(self):
# Assert graph properties.
g = self.g.copy()
self.assertTrue(len(g.nodes) == 3)
self.assertTrue(len(g.edges) == 2)
self.assertTrue(g.distance == 10.0)
self.assertTrue(g.density == 2 / 3.0)
self.assertTrue(g.is_complete == False)
self.assertTrue(g.is_sparse == False)
self.assertTrue(g.is_dense == True)
self.assertTrue(g._adjacency == None)
self.assertTrue(isinstance(g.layout, graph.GraphLayout))
self.assertTrue(isinstance(g.layout, graph.GraphSpringLayout))
print("pattern.graph.Graph")
def test_graph_nodes(self):
# Assert graph nodes.
g = self.g.copy()
g.append(graph.Node, "d")
g.add_node("e", base=graph.Node, root=True)
self.assertTrue("d" in g)
self.assertTrue("e" in g)
self.assertTrue(g.root == g["e"])
self.assertTrue(g["e"] == g.node("e") == g.nodes[-1])
g.remove(g["d"])
g.remove(g["e"])
self.assertTrue("d" not in g)
self.assertTrue("e" not in g)
print("pattern.graph.Graph.add_node()")
def test_graph_edges(self):
# Assert graph edges.
g = self.g.copy()
v1 = g.add_edge("d", "e") # Automatically create Node(d) and Node(e).
v2 = g.add_edge("d", "e") # Yields existing edge.
v3 = g.add_edge("e", "d") # Opposite direction.
self.assertEqual(v1, v2)
self.assertEqual(v2, g.edge("d", "e"))
self.assertEqual(v3, g.edge("e", "d"))
self.assertEqual(g["d"].links.edge(g["e"]), v2)
self.assertEqual(g["e"].links.edge(g["d"]), v3)
g.remove(g["d"])
g.remove(g["e"])
# Edges d->e and e->d should now be removed automatically.
self.assertEqual(len(g.edges), 2)
print("pattern.graph.Graph.add_edge()")
def test_cache(self):
# Assert adjacency cache is flushed when nodes, edges or direction
# changes.
g = self.g.copy()
g.eigenvector_centrality()
self.assertEqual(g._adjacency[0]["a"], {})
self.assertEqual(g._adjacency[0]["b"]["a"], 1.0)
g.add_node("d")
g.add_node("e")
self.assertEqual(g._adjacency, None)
g.betweenness_centrality()
self.assertEqual(g._adjacency[0]["a"]["b"], 1.0)
self.assertEqual(g._adjacency[0]["b"]["a"], 1.0)
g.add_edge("d", "e", weight=0.0)
g.remove(g.node("d"))
g.remove(g.node("e"))
print("pattern.graph.Graph._adjacency")
def test_paths(self):
# Assert node paths.
g = self.g.copy()
self.assertEqual(g.paths("a", "c"), g.paths(g["a"], g["c"]))
self.assertEqual(g.paths("a", "c"), [[g["a"], g["b"], g["c"]]])
self.assertEqual(g.paths("a", "c", length=2), [])
# Assert node shortest paths.
g.add_edge("a", "c")
self.assertEqual(g.paths("a", "c", length=2), [[g["a"], g["c"]]])
self.assertEqual(g.shortest_path("a", "c"), [g["a"], g["c"]])
self.assertEqual(g.shortest_path("c", "a"), [g["c"], g["a"]])
self.assertEqual(g.shortest_path("c", "a", directed=True), None)
g.remove(g.edge("a", "c"))
g.add_node("d")
self.assertEqual(g.shortest_path("a", "d"), None)
self.assertEqual(g.shortest_paths("a")["b"], [g["a"], g["b"]])
self.assertEqual(g.shortest_paths("a")["c"], [g["a"], g["b"], g["c"]])
self.assertEqual(g.shortest_paths("a")["d"], None)
self.assertEqual(g.shortest_paths("c", directed=True)["a"], None)
g.remove(g["d"])
print("pattern.graph.Graph.paths()")
print("pattern.graph.Graph.shortest_path()")
print("pattern.graph.Graph.shortest_paths()")
def test_eigenvector_centrality(self):
# Assert eigenvector centrality.
self.assertEqual(self.g["a"]._weight, None)
v = self.g.eigenvector_centrality()
self.assertTrue(isinstance(v["a"], float))
self.assertTrue(v["a"] == v[self.g.node("a")])
self.assertTrue(v["a"] < v["c"])
self.assertTrue(v["b"] < v["c"])
print("pattern.graph.Graph.eigenvector_centrality()")
def test_betweenness_centrality(self):
# Assert betweenness centrality.
self.assertEqual(self.g["a"]._centrality, None)
v = self.g.betweenness_centrality()
self.assertTrue(isinstance(v["a"], float))
self.assertTrue(v["a"] == v[self.g.node("a")])
self.assertTrue(v["a"] < v["b"])
self.assertTrue(v["c"] < v["b"])
print("pattern.graph.Graph.betweenness_centrality()")
def test_sorted(self):
# Assert graph node sorting
o1 = self.g.sorted(order=graph.WEIGHT, threshold=0.0)
o2 = self.g.sorted(order=graph.CENTRALITY, threshold=0.0)
self.assertEqual(o1[0], self.g["c"])
self.assertEqual(o2[0], self.g["b"])
print("pattern.graph.Graph.sorted()")
def test_prune(self):
# Assert leaf pruning.
g = self.g.copy()
g.prune(1)
self.assertEqual(len(g), 1)
self.assertEqual(g.nodes, [g["b"]])
print("pattern.graph.Graph.prune()")
def test_fringe(self):
# Assert leaf fetching.
g = self.g.copy()
self.assertEqual(g.fringe(0), [g["a"], g["c"]])
# FIXME the ordering is variable in python3
self.assertEqual(set(g.fringe(1)), set([g["a"], g["b"], g["c"]]))
print("pattern.graph.Graph.fringe()")
def test_split(self):
# Asset subgraph splitting.
self.assertTrue(isinstance(self.g.split(), list))
self.assertTrue(isinstance(self.g.split()[0], graph.Graph))
print("pattern.graph.Graph.split()")
def test_update(self):
# Assert node position after updating layout algorithm.
self.g.update()
for n in self.g.nodes:
self.assertTrue(n.x != 0)
self.assertTrue(n.y != 0)
self.g.layout.reset()
for n in self.g.nodes:
self.assertTrue(n.x == 0)
self.assertTrue(n.y == 0)
print("pattern.graph.Graph.update()")
def test_copy(self):
# Assert deep copy of Graph.
g1 = self.g
g2 = self.g.copy()
self.assertTrue(set(g1) == set(g2)) # Same node id's.
self.assertTrue(id(g1["a"]) != id(g2["b"])) # Different node objects.
g3 = self.g.copy(nodes=[self.g["a"], self.g["b"]])
g3 = self.g.copy(nodes=["a", "b"])
self.assertTrue(len(g3.nodes), 2)
self.assertTrue(len(g3.edges), 1)
# Assert copy with subclasses of Node and Edge.
class MyNode(graph.Node):
pass
class MyEdge(graph.Edge):
pass
g4 = graph.Graph()
g4.append(MyNode, "a")
g4.append(MyNode, "b")
g4.append(MyEdge, "a", "b")
g4 = g4.copy()
self.assertTrue(isinstance(g4.nodes[0], MyNode))
self.assertTrue(isinstance(g4.edges[0], MyEdge))
print("pattern.graph.Graph.copy()")
#-------------------------------------------------------------------------
class TestGraphLayout(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph(layout=graph.SPRING, distance=10.0)
self.g.add_node("a")
self.g.add_node("b")
self.g.add_node("c")
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_layout(self):
# Assert GraphLayout properties.
gl = graph.GraphLayout(graph=self.g)
self.assertTrue(gl.graph == self.g)
self.assertTrue(gl.bounds == (0, 0, 0, 0))
self.assertTrue(gl.iterations == 0)
gl.update()
self.assertTrue(gl.iterations == 1)
print("pattern.graph.GraphLayout")
class TestGraphSpringLayout(TestGraphLayout):
def test_layout(self):
# Assert GraphSpringLayout properties.
gl = self.g.layout
self.assertTrue(gl.graph == self.g)
self.assertTrue(gl.k == 4.0)
self.assertTrue(gl.force == 0.01)
self.assertTrue(gl.repulsion == 50)
self.assertTrue(gl.bounds == (0, 0, 0, 0))
self.assertTrue(gl.iterations == 0)
gl.update()
self.assertTrue(gl.iterations == 1)
self.assertTrue(gl.bounds[0] < 0)
self.assertTrue(gl.bounds[1] < 0)
self.assertTrue(gl.bounds[2] > 0)
self.assertTrue(gl.bounds[3] > 0)
print("pattern.graph.GraphSpringLayout")
def test_distance(self):
# Assert 2D distance.
n1 = graph.Node()
n2 = graph.Node()
n1.x = -100
n2.x = +100
d = self.g.layout._distance(n1, n2)
self.assertEqual(d, (200.0, 0.0, 200.0, 40000.0))
print("pattern.graph.GraphSpringLayout._distance")
def test_repulsion(self):
# Assert repulsive node force.
gl = self.g.layout
d1 = gl._distance(self.g["a"], self.g["c"])[2]
gl.update()
d2 = gl._distance(self.g["a"], self.g["c"])[2]
self.assertTrue(d2 > d1)
self.g.layout.reset()
print("pattern.graph.GraphSpringLayout._repulse()")
def test_attraction(self):
# Assert attractive edge force.
gl = self.g.layout
self.g["a"].x = -100
self.g["b"].y = +100
d1 = gl._distance(self.g["a"], self.g["b"])[2]
gl.update()
d2 = gl._distance(self.g["a"], self.g["b"])[2]
self.assertTrue(d2 < d1)
print("pattern.graph.GraphSpringLayout._attract()")
#-------------------------------------------------------------------------
class TestGraphTraversal(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_edge("a", "b", weight=0.5)
self.g.add_edge("a", "c")
self.g.add_edge("b", "d")
self.g.add_edge("d", "e")
self.g.add_node("x")
def test_search(self):
# Assert depth-first vs. breadth-first search.
def visit(node):
a.append(node)
def traversable(node, edge):
if edge.node2.id == "e":
return False
g = self.g
a = []
graph.depth_first_search(g["a"], visit, traversable)
self.assertEqual(a, [g["a"], g["b"], g["d"], g["c"]])
a = []
graph.breadth_first_search(g["a"], visit, traversable)
self.assertEqual(a, [g["a"], g["b"], g["c"], g["d"]])
print("pattern.graph.depth_first_search()")
print("pattern.graph.breadth_first_search()")
def test_paths(self):
# Assert depth-first all paths.
g = self.g.copy()
g.add_edge("a", "d")
for id1, id2, length, path in (
("a", "a", 1, [["a"]]),
("a", "d", 3, [["a", "d"], ["a", "b", "d"]]),
("a", "d", 2, [["a", "d"]]),
("a", "d", 1, []),
("a", "x", 1, [])):
p = graph.paths(g, id1, id2, length)
self.assertEqual(p, path)
print("pattern.graph.paths()")
def test_edges(self):
# Assert path of nodes to edges.
g = self.g
p = [g["a"], g["b"], g["d"], g["x"]]
e = list(graph.edges(p))
self.assertEqual(e, [g.edge("a", "b"), g.edge("b", "d"), None])
print("pattern.graph.edges()")
def test_adjacency(self):
# Assert adjacency map with different settings.
a = [
graph.adjacency(self.g),
graph.adjacency(self.g, directed=True),
graph.adjacency(self.g, directed=True, reversed=True),
graph.adjacency(self.g, stochastic=True),
graph.adjacency(self.g, heuristic=lambda id1, id2: 0.1),
]
for i in range(len(a)):
a[i] = sorted((id1, sorted((id2, round(w, 2))
for id2, w in p.items())) for id1, p in a[i].items())
self.assertEqual(a[0], [
("a", [("b", 0.75), ("c", 1.0)]),
("b", [("a", 0.75), ("d", 1.0)]),
("c", [("a", 1.0)]),
("d", [("b", 1.0), ("e", 1.0)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[1], [
("a", [("b", 0.75), ("c", 1.0)]),
("b", [("d", 1.0)]),
("c", []),
("d", [("e", 1.0)]),
("e", []),
("x", [])])
self.assertEqual(a[2], [
("a", []),
("b", [("a", 0.75)]),
("c", [("a", 1.0)]),
("d", [("b", 1.0)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[3], [
("a", [("b", 0.43), ("c", 0.57)]),
("b", [("a", 0.43), ("d", 0.57)]),
("c", [("a", 1.0)]),
("d", [("b", 0.5), ("e", 0.5)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[4], [
("a", [("b", 0.85), ("c", 1.1)]),
("b", [("a", 0.85), ("d", 1.1)]),
("c", [("a", 1.1)]),
("d", [("b", 1.1), ("e", 1.1)]),
("e", [("d", 1.1)]),
("x", [])])
print("pattern.graph.adjacency()")
def test_dijkstra_shortest_path(self):
# Assert Dijkstra's algorithm (node1 -> node2).
g = self.g.copy()
g.add_edge("d", "a")
for id1, id2, heuristic, directed, path in (
("a", "d", None, False, ["a", "d"]),
("a", "d", None, True, ["a", "b", "d"]),
("a", "d", lambda id1, id2: id1 == "d" and id2 == "a" and 1 or 0, False, ["a", "b", "d"])):
p = graph.dijkstra_shortest_path(g, id1, id2, heuristic, directed)
self.assertEqual(p, path)
print("pattern.graph.dijkstra_shortest_path()")
def test_dijkstra_shortest_paths(self):
# Assert Dijkstra's algorithm (node1 -> all).
g = self.g.copy()
g.add_edge("d", "a")
a = [
graph.dijkstra_shortest_paths(g, "a"),
graph.dijkstra_shortest_paths(g, "a", directed=True),
graph.dijkstra_shortest_paths(
g, "a", heuristic=lambda id1, id2: id1 == "d" and id2 == "a" and 1 or 0)
]
for i in range(len(a)):
a[i] = sorted(a[i].items())
self.assertEqual(a[0], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "d"]),
("e", ["a", "d", "e"]),
("x", None)])
self.assertEqual(a[1], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "b", "d"]),
("e", ["a", "b", "d", "e"]),
("x", None)])
self.assertEqual(a[2], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "b", "d"]),
("e", ["a", "b", "d", "e"]),
("x", None)])
print("pattern.graph.dijkstra_shortest_paths()")
def test_floyd_warshall_all_pairs_distance(self):
# Assert all pairs path distance.
p1 = graph.floyd_warshall_all_pairs_distance(self.g)
p2 = sorted((id1, sorted((id2, round(w, 2))
for id2, w in p.items())) for id1, p in p1.items())
self.assertEqual(p2, [
("a", [("a", 0.00), ("b", 0.75),
("c", 1.00), ("d", 1.75), ("e", 2.75)]),
("b", [("a", 0.75), ("b", 0.00),
("c", 1.75), ("d", 1.00), ("e", 2.00)]),
("c", [("a", 1.00), ("b", 1.75),
("c", 2.00), ("d", 2.75), ("e", 3.75)]),
("d", [("a", 1.75), ("b", 1.00),
("c", 2.75), ("d", 0.00), ("e", 1.00)]),
("e", [("a", 2.75), ("b", 2.00),
("c", 3.75), ("d", 1.00), ("e", 2.00)]),
("x", [])])
# Assert predecessor tree.
self.assertEqual(
graph.predecessor_path(p1.predecessors, "a", "d"), ["a", "b", "d"])
print("pattern.graph.floyd_warshall_all_pairs_distance()")
#-------------------------------------------------------------------------
class TestGraphPartitioning(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_edge("a", "b", weight=0.5)
self.g.add_edge("a", "c")
self.g.add_edge("b", "d")
self.g.add_edge("d", "e")
self.g.add_edge("x", "y")
self.g.add_node("z")
def test_union(self):
self.assertEqual(graph.union([1, 2], [2, 3]), [1, 2, 3])
def test_intersection(self):
self.assertEqual(graph.intersection([1, 2], [2, 3]), [2])
def test_difference(self):
self.assertEqual(graph.difference([1, 2], [2, 3]), [1])
def test_partition(self):
# Assert unconnected subgraph partitioning.
g = graph.partition(self.g)
self.assertTrue(len(g) == 3)
self.assertTrue(isinstance(g[0], graph.Graph))
self.assertTrue(sorted(g[0].keys()), ["a", "b", "c", "d", "e"])
self.assertTrue(sorted(g[1].keys()), ["x", "y"])
self.assertTrue(sorted(g[2].keys()), ["z"])
print("pattern.graph.partition()")
def test_clique(self):
# Assert node cliques.
v = graph.clique(self.g, "a")
self.assertEqual(v, ["a", "b"])
self.g.add_edge("b", "c")
v = graph.clique(self.g, "a")
self.assertEqual(v, ["a", "b", "c"])
v = graph.cliques(self.g, 2)
self.assertEqual(
v, [["a", "b", "c"], ["b", "d"], ["d", "e"], ["x", "y"]])
print("pattern.graph.clique()")
print("pattern.graph.cliques()")
#-------------------------------------------------------------------------
class TestGraphMaintenance(unittest.TestCase):
def setUp(self):
pass
def test_unlink(self):
# Assert remove all edges to/from Node(a).
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("a", "c")
graph.unlink(g, g["a"])
self.assertTrue(len(g.edges) == 0)
# Assert remove edges between Node(a) and Node(b)
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("a", "c")
graph.unlink(g, g["a"], "b")
self.assertTrue(len(g.edges) == 1)
print("pattern.graph.unlink()")
def test_redirect(self):
# Assert transfer connections of Node(a) to Node(d).
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("c", "a")
g.add_node("d")
graph.redirect(g, g["a"], "d")
self.assertTrue(len(g["a"].edges) == 0)
self.assertTrue(len(g["d"].edges) == 2)
self.assertTrue(g.edge("d", "c").node1 == g["c"])
print("pattern.graph.redirect()")
def test_cut(self):
# Assert unlink Node(b) and redirect a->c and a->d.
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("b", "c")
g.add_edge("b", "d")
graph.cut(g, g["b"])
self.assertTrue(len(g["b"].edges) == 0)
self.assertTrue(g.edge("a", "c") is not None)
self.assertTrue(g.edge("a", "d") is not None)
print("pattern.graph.cut()")
def test_insert(self):
g = graph.Graph()
g.add_edge("a", "b")
g.add_node("c")
graph.insert(g, g["c"], g["a"], g["b"])
self.assertTrue(g.edge("a", "b") is None)
self.assertTrue(g.edge("a", "c") is not None)
self.assertTrue(g.edge("c", "b") is not None)
print("pattern.graph.insert()")
#-------------------------------------------------------------------------
class TestGraphCommonsense(unittest.TestCase):
def setUp(self):
pass
def test_halo(self):
# Assert concept halo (e.g., latent related concepts).
g = commonsense.Commonsense()
v = [concept.id for concept in g["rose"].halo]
self.assertTrue("red" in v)
self.assertTrue("romance" in v)
# Concept.properties is the list of properties (adjectives) in the
# halo.
v = g["rose"].properties
self.assertTrue("red" in v)
self.assertTrue("romance" not in v)
print("pattern.graph.commonsense.Concept.halo")
print("pattern.graph.commonsense.Concept.properties")
def test_field(self):
# Assert semantic field (e.g., concept taxonomy).
g = commonsense.Commonsense()
v = [concept.id for concept in g.field("color")]
self.assertTrue("red" in v)
self.assertTrue("green" in v)
self.assertTrue("blue" in v)
print("pattern.graph.commonsense.Commonsense.field()")
def test_similarity(self):
# Assert that tiger is more similar to lion than to spoon
# (which is common sense).
g = commonsense.Commonsense()
w1 = g.similarity("tiger", "lion")
w2 = g.similarity("tiger", "spoon")
self.assertTrue(w1 > w2)
print("pattern.graph.commonsense.Commonsense.similarity()")
#-------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
|
hayd/pattern
|
test/test_graph.py
|
Python
|
bsd-3-clause
| 26,561
|
[
"VisIt"
] |
48f2a3f099b31f79f5ffb2ae4ba5f243d6c3d0fb1379f9f0005422bd3216e922
|
# Copyright 2003 by Sebastian Bassi. sbassi@genesdigitales.com
# All rights reserved. This code is part of the Biopython
# distribution and governed by its license.
# Please see the LICENSE file that should have been included as part
# of this package.
import warnings
warnings.warn("Bio.lcc is deprecated; it has been moved to Bio.SeqUtils.lcc instead", DeprecationWarning)
import math
from string import count
crom=0
compone=[0]
lccsal=[0]
def lcc_mult(seq,wsize,start,end):
"""Return a list called lccsal, the LCC, a complexity measure
from a sequence, called seq."""
l2=math.log(2)
tamseq=end-start
global compone
global lccsal
compone=[0]
lccsal=[0]
for i in range(wsize):
compone.append(((i+1)/float(wsize))*((math.log((i+1)/float(wsize)))/l2))
window=seq[0:wsize]
cant_a=count(window,'A')
cant_c=count(window,'C')
cant_t=count(window,'T')
cant_g=count(window,'G')
term_a=compone[cant_a]
term_c=compone[cant_c]
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal[0]=(-(term_a+term_c+term_t+term_g))
tail=seq[0]
for x in range (tamseq-wsize):
window=seq[x+1:wsize+x+1]
if tail==window[-1]:
lccsal.append(lccsal[-1])
#break
elif tail=='A':
cant_a=cant_a-1
if window[-1]=='C':
cant_c=cant_c+1
term_a=compone[cant_a]
term_c=compone[cant_c]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window[-1]=='T':
cant_t=cant_t+1
term_a=compone[cant_a]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window[-1]=='G':
cant_g=cant_g+1
term_a=compone[cant_a]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='C':
cant_c=cant_c-1
if window[-1]=='A':
cant_a=cant_a+1
term_a=compone[cant_a]
term_c=compone[cant_c]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window[-1]=='T':
cant_t=cant_t+1
term_c=compone[cant_c]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window[-1]=='G':
cant_g=cant_g+1
term_c=compone[cant_c]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='T':
cant_t=cant_t-1
if window[-1]=='A':
cant_a=cant_a+1
term_a=compone[cant_a]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window[-1]=='C':
cant_c=cant_c+1
term_c=compone[cant_c]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window[-1]=='G':
cant_g=cant_g+1
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='G':
cant_g=cant_g-1
if window[-1]=='A':
cant_a=cant_a+1
term_a=compone[cant_a]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window[-1]=='C':
cant_c=cant_c+1
term_c=compone[cant_c]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window[-1]=='T':
cant_t=cant_t+1
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
tail=window[0]
return lccsal
def lcc_simp(seq,start,end):
"""Return LCC, a complexity measure from a sequence (seq.)"""
wsize=end-start
l2=math.log(2)
window=seq[start:end]
if count(window,'A')==0:
term_a=0
# This check is usefull in order to avoid calculate log of 0.
else:
term_a=((count(window,'A'))/float(wsize))*((math.log((count(window,'A'))/float(wsize)))/l2)
if count(window,'C')==0:
term_c=0
else:
term_c=((count(window,'C'))/float(wsize))*((math.log((count(window,'C'))/float(wsize)))/l2)
if count(window,'T')==0:
term_t=0
else:
term_t=((count(window,'T'))/float(wsize))*((math.log((count(window,'T'))/float(wsize)))/l2)
if count(window,'G')==0:
term_g=0
else:
term_g=((count(window,'G'))/float(wsize))*((math.log((count(window,'G'))/float(wsize)))/l2)
lccsal=-(term_a+term_c+term_t+term_g)
return lccsal
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/lcc.py
|
Python
|
apache-2.0
| 4,885
|
[
"Biopython"
] |
f896f414459740f07387e8ca2a2a5e6927e0596f876c9b5fb9e0bd0f68a30447
|
"""
Main module.
Implement the central Checker class.
Also, it models the Bindings and Scopes.
"""
import __future__
import ast
import bisect
import collections
import contextlib
import doctest
import functools
import os
import re
import string
import sys
import tokenize
from pyflakes import messages
PY2 = sys.version_info < (3, 0)
PY35_PLUS = sys.version_info >= (3, 5) # Python 3.5 and above
PY36_PLUS = sys.version_info >= (3, 6) # Python 3.6 and above
PY38_PLUS = sys.version_info >= (3, 8)
try:
sys.pypy_version_info
PYPY = True
except AttributeError:
PYPY = False
builtin_vars = dir(__import__('__builtin__' if PY2 else 'builtins'))
parse_format_string = string.Formatter().parse
if PY2:
tokenize_tokenize = tokenize.generate_tokens
else:
tokenize_tokenize = tokenize.tokenize
if PY2:
def getNodeType(node_class):
# workaround str.upper() which is locale-dependent
return str(unicode(node_class.__name__).upper())
def get_raise_argument(node):
return node.type
else:
def getNodeType(node_class):
return node_class.__name__.upper()
def get_raise_argument(node):
return node.exc
# Silence `pyflakes` from reporting `undefined name 'unicode'` in Python 3.
unicode = str
# Python >= 3.3 uses ast.Try instead of (ast.TryExcept + ast.TryFinally)
if PY2:
def getAlternatives(n):
if isinstance(n, (ast.If, ast.TryFinally)):
return [n.body]
if isinstance(n, ast.TryExcept):
return [n.body + n.orelse] + [[hdl] for hdl in n.handlers]
else:
def getAlternatives(n):
if isinstance(n, ast.If):
return [n.body]
if isinstance(n, ast.Try):
return [n.body + n.orelse] + [[hdl] for hdl in n.handlers]
if PY35_PLUS:
FOR_TYPES = (ast.For, ast.AsyncFor)
LOOP_TYPES = (ast.While, ast.For, ast.AsyncFor)
FUNCTION_TYPES = (ast.FunctionDef, ast.AsyncFunctionDef)
else:
FOR_TYPES = (ast.For,)
LOOP_TYPES = (ast.While, ast.For)
FUNCTION_TYPES = (ast.FunctionDef,)
if PY36_PLUS:
ANNASSIGN_TYPES = (ast.AnnAssign,)
else:
ANNASSIGN_TYPES = ()
if PY38_PLUS:
def _is_singleton(node): # type: (ast.AST) -> bool
return (
isinstance(node, ast.Constant) and
isinstance(node.value, (bool, type(Ellipsis), type(None)))
)
elif not PY2:
def _is_singleton(node): # type: (ast.AST) -> bool
return isinstance(node, (ast.NameConstant, ast.Ellipsis))
else:
def _is_singleton(node): # type: (ast.AST) -> bool
return (
isinstance(node, ast.Name) and
node.id in {'True', 'False', 'Ellipsis', 'None'}
)
def _is_tuple_constant(node): # type: (ast.AST) -> bool
return (
isinstance(node, ast.Tuple) and
all(_is_constant(elt) for elt in node.elts)
)
if PY38_PLUS:
def _is_constant(node):
return isinstance(node, ast.Constant) or _is_tuple_constant(node)
else:
_const_tps = (ast.Str, ast.Num)
if not PY2:
_const_tps += (ast.Bytes,)
def _is_constant(node):
return (
isinstance(node, _const_tps) or
_is_singleton(node) or
_is_tuple_constant(node)
)
def _is_const_non_singleton(node): # type: (ast.AST) -> bool
return _is_constant(node) and not _is_singleton(node)
def _is_name_or_attr(node, name): # type: (ast.Ast, str) -> bool
return (
(isinstance(node, ast.Name) and node.id == name) or
(isinstance(node, ast.Attribute) and node.attr == name)
)
# https://github.com/python/typed_ast/blob/1.4.0/ast27/Parser/tokenizer.c#L102-L104
TYPE_COMMENT_RE = re.compile(r'^#\s*type:\s*')
# https://github.com/python/typed_ast/blob/1.4.0/ast27/Parser/tokenizer.c#L1408-L1413
ASCII_NON_ALNUM = ''.join([chr(i) for i in range(128) if not chr(i).isalnum()])
TYPE_IGNORE_RE = re.compile(
TYPE_COMMENT_RE.pattern + r'ignore([{}]|$)'.format(ASCII_NON_ALNUM))
# https://github.com/python/typed_ast/blob/1.4.0/ast27/Grammar/Grammar#L147
TYPE_FUNC_RE = re.compile(r'^(\(.*?\))\s*->\s*(.*)$')
MAPPING_KEY_RE = re.compile(r'\(([^()]*)\)')
CONVERSION_FLAG_RE = re.compile('[#0+ -]*')
WIDTH_RE = re.compile(r'(?:\*|\d*)')
PRECISION_RE = re.compile(r'(?:\.(?:\*|\d*))?')
LENGTH_RE = re.compile('[hlL]?')
# https://docs.python.org/3/library/stdtypes.html#old-string-formatting
VALID_CONVERSIONS = frozenset('diouxXeEfFgGcrsa%')
def _must_match(regex, string, pos):
# type: (Pattern[str], str, int) -> Match[str]
match = regex.match(string, pos)
assert match is not None
return match
def parse_percent_format(s): # type: (str) -> Tuple[PercentFormat, ...]
"""Parses the string component of a `'...' % ...` format call
Copied from https://github.com/asottile/pyupgrade at v1.20.1
"""
def _parse_inner():
# type: () -> Generator[PercentFormat, None, None]
string_start = 0
string_end = 0
in_fmt = False
i = 0
while i < len(s):
if not in_fmt:
try:
i = s.index('%', i)
except ValueError: # no more % fields!
yield s[string_start:], None
return
else:
string_end = i
i += 1
in_fmt = True
else:
key_match = MAPPING_KEY_RE.match(s, i)
if key_match:
key = key_match.group(1) # type: Optional[str]
i = key_match.end()
else:
key = None
conversion_flag_match = _must_match(CONVERSION_FLAG_RE, s, i)
conversion_flag = conversion_flag_match.group() or None
i = conversion_flag_match.end()
width_match = _must_match(WIDTH_RE, s, i)
width = width_match.group() or None
i = width_match.end()
precision_match = _must_match(PRECISION_RE, s, i)
precision = precision_match.group() or None
i = precision_match.end()
# length modifier is ignored
i = _must_match(LENGTH_RE, s, i).end()
try:
conversion = s[i]
except IndexError:
raise ValueError('end-of-string while parsing format')
i += 1
fmt = (key, conversion_flag, width, precision, conversion)
yield s[string_start:string_end], fmt
in_fmt = False
string_start = i
if in_fmt:
raise ValueError('end-of-string while parsing format')
return tuple(_parse_inner())
class _FieldsOrder(dict):
"""Fix order of AST node fields."""
def _get_fields(self, node_class):
# handle iter before target, and generators before element
fields = node_class._fields
if 'iter' in fields:
key_first = 'iter'.find
elif 'generators' in fields:
key_first = 'generators'.find
else:
key_first = 'value'.find
return tuple(sorted(fields, key=key_first, reverse=True))
def __missing__(self, node_class):
self[node_class] = fields = self._get_fields(node_class)
return fields
def counter(items):
"""
Simplest required implementation of collections.Counter. Required as 2.6
does not have Counter in collections.
"""
results = {}
for item in items:
results[item] = results.get(item, 0) + 1
return results
def iter_child_nodes(node, omit=None, _fields_order=_FieldsOrder()):
"""
Yield all direct child nodes of *node*, that is, all fields that
are nodes and all items of fields that are lists of nodes.
:param node: AST node to be iterated upon
:param omit: String or tuple of strings denoting the
attributes of the node to be omitted from
further parsing
:param _fields_order: Order of AST node fields
"""
for name in _fields_order[node.__class__]:
if omit and name in omit:
continue
field = getattr(node, name, None)
if isinstance(field, ast.AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, ast.AST):
yield item
def convert_to_value(item):
if isinstance(item, ast.Str):
return item.s
elif hasattr(ast, 'Bytes') and isinstance(item, ast.Bytes):
return item.s
elif isinstance(item, ast.Tuple):
return tuple(convert_to_value(i) for i in item.elts)
elif isinstance(item, ast.Num):
return item.n
elif isinstance(item, ast.Name):
result = VariableKey(item=item)
constants_lookup = {
'True': True,
'False': False,
'None': None,
}
return constants_lookup.get(
result.name,
result,
)
elif (not PY2) and isinstance(item, ast.NameConstant):
# None, True, False are nameconstants in python3, but names in 2
return item.value
else:
return UnhandledKeyType()
def is_notimplemented_name_node(node):
return isinstance(node, ast.Name) and getNodeName(node) == 'NotImplemented'
class Binding(object):
"""
Represents the binding of a value to a name.
The checker uses this to keep track of which names have been bound and
which names have not. See L{Assignment} for a special type of binding that
is checked with stricter rules.
@ivar used: pair of (L{Scope}, node) indicating the scope and
the node that this binding was last used.
"""
def __init__(self, name, source):
self.name = name
self.source = source
self.used = False
def __str__(self):
return self.name
def __repr__(self):
return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__,
self.name,
self.source.lineno,
id(self))
def redefines(self, other):
return isinstance(other, Definition) and self.name == other.name
class Definition(Binding):
"""
A binding that defines a function or a class.
"""
class Builtin(Definition):
"""A definition created for all Python builtins."""
def __init__(self, name):
super(Builtin, self).__init__(name, None)
def __repr__(self):
return '<%s object %r at 0x%x>' % (self.__class__.__name__,
self.name,
id(self))
class UnhandledKeyType(object):
"""
A dictionary key of a type that we cannot or do not check for duplicates.
"""
class VariableKey(object):
"""
A dictionary key which is a variable.
@ivar item: The variable AST object.
"""
def __init__(self, item):
self.name = item.id
def __eq__(self, compare):
return (
compare.__class__ == self.__class__ and
compare.name == self.name
)
def __hash__(self):
return hash(self.name)
class Importation(Definition):
"""
A binding created by an import statement.
@ivar fullName: The complete name given to the import statement,
possibly including multiple dotted components.
@type fullName: C{str}
"""
def __init__(self, name, source, full_name=None):
self.fullName = full_name or name
self.redefined = []
super(Importation, self).__init__(name, source)
def redefines(self, other):
if isinstance(other, SubmoduleImportation):
# See note in SubmoduleImportation about RedefinedWhileUnused
return self.fullName == other.fullName
return isinstance(other, Definition) and self.name == other.name
def _has_alias(self):
"""Return whether importation needs an as clause."""
return not self.fullName.split('.')[-1] == self.name
@property
def source_statement(self):
"""Generate a source statement equivalent to the import."""
if self._has_alias():
return 'import %s as %s' % (self.fullName, self.name)
else:
return 'import %s' % self.fullName
def __str__(self):
"""Return import full name with alias."""
if self._has_alias():
return self.fullName + ' as ' + self.name
else:
return self.fullName
class SubmoduleImportation(Importation):
"""
A binding created by a submodule import statement.
A submodule import is a special case where the root module is implicitly
imported, without an 'as' clause, and the submodule is also imported.
Python does not restrict which attributes of the root module may be used.
This class is only used when the submodule import is without an 'as' clause.
pyflakes handles this case by registering the root module name in the scope,
allowing any attribute of the root module to be accessed.
RedefinedWhileUnused is suppressed in `redefines` unless the submodule
name is also the same, to avoid false positives.
"""
def __init__(self, name, source):
# A dot should only appear in the name when it is a submodule import
assert '.' in name and (not source or isinstance(source, ast.Import))
package_name = name.split('.')[0]
super(SubmoduleImportation, self).__init__(package_name, source)
self.fullName = name
def redefines(self, other):
if isinstance(other, Importation):
return self.fullName == other.fullName
return super(SubmoduleImportation, self).redefines(other)
def __str__(self):
return self.fullName
@property
def source_statement(self):
return 'import ' + self.fullName
class ImportationFrom(Importation):
def __init__(self, name, source, module, real_name=None):
self.module = module
self.real_name = real_name or name
if module.endswith('.'):
full_name = module + self.real_name
else:
full_name = module + '.' + self.real_name
super(ImportationFrom, self).__init__(name, source, full_name)
def __str__(self):
"""Return import full name with alias."""
if self.real_name != self.name:
return self.fullName + ' as ' + self.name
else:
return self.fullName
@property
def source_statement(self):
if self.real_name != self.name:
return 'from %s import %s as %s' % (self.module,
self.real_name,
self.name)
else:
return 'from %s import %s' % (self.module, self.name)
class StarImportation(Importation):
"""A binding created by a 'from x import *' statement."""
def __init__(self, name, source):
super(StarImportation, self).__init__('*', source)
# Each star importation needs a unique name, and
# may not be the module name otherwise it will be deemed imported
self.name = name + '.*'
self.fullName = name
@property
def source_statement(self):
return 'from ' + self.fullName + ' import *'
def __str__(self):
# When the module ends with a ., avoid the ambiguous '..*'
if self.fullName.endswith('.'):
return self.source_statement
else:
return self.name
class FutureImportation(ImportationFrom):
"""
A binding created by a from `__future__` import statement.
`__future__` imports are implicitly used.
"""
def __init__(self, name, source, scope):
super(FutureImportation, self).__init__(name, source, '__future__')
self.used = (scope, source)
class Argument(Binding):
"""
Represents binding a name as an argument.
"""
class Assignment(Binding):
"""
Represents binding a name with an explicit assignment.
The checker will raise warnings for any Assignment that isn't used. Also,
the checker does not consider assignments in tuple/list unpacking to be
Assignments, rather it treats them as simple Bindings.
"""
class Annotation(Binding):
"""
Represents binding a name to a type without an associated value.
As long as this name is not assigned a value in another binding, it is considered
undefined for most purposes. One notable exception is using the name as a type
annotation.
"""
def redefines(self, other):
"""An Annotation doesn't define any name, so it cannot redefine one."""
return False
class FunctionDefinition(Definition):
pass
class ClassDefinition(Definition):
pass
class ExportBinding(Binding):
"""
A binding created by an C{__all__} assignment. If the names in the list
can be determined statically, they will be treated as names for export and
additional checking applied to them.
The only recognized C{__all__} assignment via list/tuple concatenation is in the
following format:
__all__ = ['a'] + ['b'] + ['c']
Names which are imported and not otherwise used but appear in the value of
C{__all__} will not have an unused import warning reported for them.
"""
def __init__(self, name, source, scope):
if '__all__' in scope and isinstance(source, ast.AugAssign):
self.names = list(scope['__all__'].names)
else:
self.names = []
def _add_to_names(container):
for node in container.elts:
if isinstance(node, ast.Str):
self.names.append(node.s)
if isinstance(source.value, (ast.List, ast.Tuple)):
_add_to_names(source.value)
# If concatenating lists or tuples
elif isinstance(source.value, ast.BinOp):
currentValue = source.value
while isinstance(currentValue.right, (ast.List, ast.Tuple)):
left = currentValue.left
right = currentValue.right
_add_to_names(right)
# If more lists are being added
if isinstance(left, ast.BinOp):
currentValue = left
# If just two lists are being added
elif isinstance(left, (ast.List, ast.Tuple)):
_add_to_names(left)
# All lists accounted for - done
break
# If not list concatenation
else:
break
super(ExportBinding, self).__init__(name, source)
class Scope(dict):
importStarred = False # set to True when import * is found
def __repr__(self):
scope_cls = self.__class__.__name__
return '<%s at 0x%x %s>' % (scope_cls, id(self), dict.__repr__(self))
class ClassScope(Scope):
pass
class FunctionScope(Scope):
"""
I represent a name scope for a function.
@ivar globals: Names declared 'global' in this function.
"""
usesLocals = False
alwaysUsed = {'__tracebackhide__', '__traceback_info__',
'__traceback_supplement__'}
def __init__(self):
super(FunctionScope, self).__init__()
# Simplify: manage the special locals as globals
self.globals = self.alwaysUsed.copy()
self.returnValue = None # First non-empty return
self.isGenerator = False # Detect a generator
def unusedAssignments(self):
"""
Return a generator for the assignments which have not been used.
"""
for name, binding in self.items():
if (not binding.used and
name != '_' and # see issue #202
name not in self.globals and
not self.usesLocals and
isinstance(binding, Assignment)):
yield name, binding
class GeneratorScope(Scope):
pass
class ModuleScope(Scope):
"""Scope for a module."""
_futures_allowed = True
_annotations_future_enabled = False
class DoctestScope(ModuleScope):
"""Scope for a doctest."""
class DummyNode(object):
"""Used in place of an `ast.AST` to set error message positions"""
def __init__(self, lineno, col_offset):
self.lineno = lineno
self.col_offset = col_offset
class DetectClassScopedMagic:
names = dir()
# Globally defined names which are not attributes of the builtins module, or
# are only present on some platforms.
_MAGIC_GLOBALS = ['__file__', '__builtins__', 'WindowsError']
# module scope annotation will store in `__annotations__`, see also PEP 526.
if PY36_PLUS:
_MAGIC_GLOBALS.append('__annotations__')
def getNodeName(node):
# Returns node.id, or node.name, or None
if hasattr(node, 'id'): # One of the many nodes with an id
return node.id
if hasattr(node, 'name'): # an ExceptHandler node
return node.name
if hasattr(node, 'rest'): # a MatchMapping node
return node.rest
TYPING_MODULES = frozenset(('typing', 'typing_extensions'))
def _is_typing_helper(node, is_name_match_fn, scope_stack):
"""
Internal helper to determine whether or not something is a member of a
typing module. This is used as part of working out whether we are within a
type annotation context.
Note: you probably don't want to use this function directly. Instead see the
utils below which wrap it (`_is_typing` and `_is_any_typing_member`).
"""
def _bare_name_is_attr(name):
for scope in reversed(scope_stack):
if name in scope:
return (
isinstance(scope[name], ImportationFrom) and
scope[name].module in TYPING_MODULES and
is_name_match_fn(scope[name].real_name)
)
return False
def _module_scope_is_typing(name):
for scope in reversed(scope_stack):
if name in scope:
return (
isinstance(scope[name], Importation) and
scope[name].fullName in TYPING_MODULES
)
return False
return (
(
isinstance(node, ast.Name) and
_bare_name_is_attr(node.id)
) or (
isinstance(node, ast.Attribute) and
isinstance(node.value, ast.Name) and
_module_scope_is_typing(node.value.id) and
is_name_match_fn(node.attr)
)
)
def _is_typing(node, typing_attr, scope_stack):
"""
Determine whether `node` represents the member of a typing module specified
by `typing_attr`.
This is used as part of working out whether we are within a type annotation
context.
"""
return _is_typing_helper(node, lambda x: x == typing_attr, scope_stack)
def _is_any_typing_member(node, scope_stack):
"""
Determine whether `node` represents any member of a typing module.
This is used as part of working out whether we are within a type annotation
context.
"""
return _is_typing_helper(node, lambda x: True, scope_stack)
def is_typing_overload(value, scope_stack):
return (
isinstance(value.source, FUNCTION_TYPES) and
any(
_is_typing(dec, 'overload', scope_stack)
for dec in value.source.decorator_list
)
)
class AnnotationState:
NONE = 0
STRING = 1
BARE = 2
def in_annotation(func):
@functools.wraps(func)
def in_annotation_func(self, *args, **kwargs):
with self._enter_annotation():
return func(self, *args, **kwargs)
return in_annotation_func
def in_string_annotation(func):
@functools.wraps(func)
def in_annotation_func(self, *args, **kwargs):
with self._enter_annotation(AnnotationState.STRING):
return func(self, *args, **kwargs)
return in_annotation_func
def make_tokens(code):
# PY3: tokenize.tokenize requires readline of bytes
if not isinstance(code, bytes):
code = code.encode('UTF-8')
lines = iter(code.splitlines(True))
# next(lines, b'') is to prevent an error in pypy3
return tuple(tokenize_tokenize(lambda: next(lines, b'')))
class _TypeableVisitor(ast.NodeVisitor):
"""Collect the line number and nodes which are deemed typeable by
PEP 484
https://www.python.org/dev/peps/pep-0484/#type-comments
"""
def __init__(self):
self.typeable_lines = [] # type: List[int]
self.typeable_nodes = {} # type: Dict[int, ast.AST]
def _typeable(self, node):
# if there is more than one typeable thing on a line last one wins
self.typeable_lines.append(node.lineno)
self.typeable_nodes[node.lineno] = node
self.generic_visit(node)
visit_Assign = visit_For = visit_FunctionDef = visit_With = _typeable
visit_AsyncFor = visit_AsyncFunctionDef = visit_AsyncWith = _typeable
def _collect_type_comments(tree, tokens):
visitor = _TypeableVisitor()
visitor.visit(tree)
type_comments = collections.defaultdict(list)
for tp, text, start, _, _ in tokens:
if (
tp != tokenize.COMMENT or # skip non comments
not TYPE_COMMENT_RE.match(text) or # skip non-type comments
TYPE_IGNORE_RE.match(text) # skip ignores
):
continue
# search for the typeable node at or before the line number of the
# type comment.
# if the bisection insertion point is before any nodes this is an
# invalid type comment which is ignored.
lineno, _ = start
idx = bisect.bisect_right(visitor.typeable_lines, lineno)
if idx == 0:
continue
node = visitor.typeable_nodes[visitor.typeable_lines[idx - 1]]
type_comments[node].append((start, text))
return type_comments
class Checker(object):
"""
I check the cleanliness and sanity of Python code.
@ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements
of the list are two-tuples. The first element is the callable passed
to L{deferFunction}. The second element is a copy of the scope stack
at the time L{deferFunction} was called.
@ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for
callables which are deferred assignment checks.
"""
_ast_node_scope = {
ast.Module: ModuleScope,
ast.ClassDef: ClassScope,
ast.FunctionDef: FunctionScope,
ast.Lambda: FunctionScope,
ast.ListComp: GeneratorScope,
ast.SetComp: GeneratorScope,
ast.GeneratorExp: GeneratorScope,
ast.DictComp: GeneratorScope,
}
if PY35_PLUS:
_ast_node_scope[ast.AsyncFunctionDef] = FunctionScope
nodeDepth = 0
offset = None
_in_annotation = AnnotationState.NONE
_in_deferred = False
builtIns = set(builtin_vars).union(_MAGIC_GLOBALS)
_customBuiltIns = os.environ.get('PYFLAKES_BUILTINS')
if _customBuiltIns:
builtIns.update(_customBuiltIns.split(','))
del _customBuiltIns
# TODO: file_tokens= is required to perform checks on type comments,
# eventually make this a required positional argument. For now it
# is defaulted to `()` for api compatibility.
def __init__(self, tree, filename='(none)', builtins=None,
withDoctest='PYFLAKES_DOCTEST' in os.environ, file_tokens=()):
self._nodeHandlers = {}
self._deferredFunctions = []
self._deferredAssignments = []
self.deadScopes = []
self.messages = []
self.filename = filename
if builtins:
self.builtIns = self.builtIns.union(builtins)
self.withDoctest = withDoctest
try:
self.scopeStack = [Checker._ast_node_scope[type(tree)]()]
except KeyError:
raise RuntimeError('No scope implemented for the node %r' % tree)
self.exceptHandlers = [()]
self.root = tree
self._type_comments = _collect_type_comments(tree, file_tokens)
for builtin in self.builtIns:
self.addBinding(None, Builtin(builtin))
self.handleChildren(tree)
self._in_deferred = True
self.runDeferred(self._deferredFunctions)
# Set _deferredFunctions to None so that deferFunction will fail
# noisily if called after we've run through the deferred functions.
self._deferredFunctions = None
self.runDeferred(self._deferredAssignments)
# Set _deferredAssignments to None so that deferAssignment will fail
# noisily if called after we've run through the deferred assignments.
self._deferredAssignments = None
del self.scopeStack[1:]
self.popScope()
self.checkDeadScopes()
def deferFunction(self, callable):
"""
Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.
"""
self._deferredFunctions.append((callable, self.scopeStack[:], self.offset))
def deferAssignment(self, callable):
"""
Schedule an assignment handler to be called just after deferred
function handlers.
"""
self._deferredAssignments.append((callable, self.scopeStack[:], self.offset))
def runDeferred(self, deferred):
"""
Run the callables in C{deferred} using their associated scope stack.
"""
for handler, scope, offset in deferred:
self.scopeStack = scope
self.offset = offset
handler()
def _in_doctest(self):
return (len(self.scopeStack) >= 2 and
isinstance(self.scopeStack[1], DoctestScope))
@property
def futuresAllowed(self):
if not all(isinstance(scope, ModuleScope)
for scope in self.scopeStack):
return False
return self.scope._futures_allowed
@futuresAllowed.setter
def futuresAllowed(self, value):
assert value is False
if isinstance(self.scope, ModuleScope):
self.scope._futures_allowed = False
@property
def annotationsFutureEnabled(self):
scope = self.scopeStack[0]
if not isinstance(scope, ModuleScope):
return False
return scope._annotations_future_enabled
@annotationsFutureEnabled.setter
def annotationsFutureEnabled(self, value):
assert value is True
assert isinstance(self.scope, ModuleScope)
self.scope._annotations_future_enabled = True
@property
def scope(self):
return self.scopeStack[-1]
def popScope(self):
self.deadScopes.append(self.scopeStack.pop())
def checkDeadScopes(self):
"""
Look at scopes which have been fully examined and report names in them
which were imported but unused.
"""
for scope in self.deadScopes:
# imports in classes are public members
if isinstance(scope, ClassScope):
continue
all_binding = scope.get('__all__')
if all_binding and not isinstance(all_binding, ExportBinding):
all_binding = None
if all_binding:
all_names = set(all_binding.names)
undefined = [
name for name in all_binding.names
if name not in scope
]
else:
all_names = undefined = []
if undefined:
if not scope.importStarred and \
os.path.basename(self.filename) != '__init__.py':
# Look for possible mistakes in the export list
for name in undefined:
self.report(messages.UndefinedExport,
scope['__all__'].source, name)
# mark all import '*' as used by the undefined in __all__
if scope.importStarred:
from_list = []
for binding in scope.values():
if isinstance(binding, StarImportation):
binding.used = all_binding
from_list.append(binding.fullName)
# report * usage, with a list of possible sources
from_list = ', '.join(sorted(from_list))
for name in undefined:
self.report(messages.ImportStarUsage,
scope['__all__'].source, name, from_list)
# Look for imported names that aren't used.
for value in scope.values():
if isinstance(value, Importation):
used = value.used or value.name in all_names
if not used:
messg = messages.UnusedImport
self.report(messg, value.source, str(value))
for node in value.redefined:
if isinstance(self.getParent(node), FOR_TYPES):
messg = messages.ImportShadowedByLoopVar
elif used:
continue
else:
messg = messages.RedefinedWhileUnused
self.report(messg, node, value.name, value.source)
def pushScope(self, scopeClass=FunctionScope):
self.scopeStack.append(scopeClass())
def report(self, messageClass, *args, **kwargs):
self.messages.append(messageClass(self.filename, *args, **kwargs))
def getParent(self, node):
# Lookup the first parent which is not Tuple, List or Starred
while True:
node = node._pyflakes_parent
if not hasattr(node, 'elts') and not hasattr(node, 'ctx'):
return node
def getCommonAncestor(self, lnode, rnode, stop):
if (
stop in (lnode, rnode) or
not (
hasattr(lnode, '_pyflakes_parent') and
hasattr(rnode, '_pyflakes_parent')
)
):
return None
if lnode is rnode:
return lnode
if (lnode._pyflakes_depth > rnode._pyflakes_depth):
return self.getCommonAncestor(lnode._pyflakes_parent, rnode, stop)
if (lnode._pyflakes_depth < rnode._pyflakes_depth):
return self.getCommonAncestor(lnode, rnode._pyflakes_parent, stop)
return self.getCommonAncestor(
lnode._pyflakes_parent,
rnode._pyflakes_parent,
stop,
)
def descendantOf(self, node, ancestors, stop):
for a in ancestors:
if self.getCommonAncestor(node, a, stop):
return True
return False
def _getAncestor(self, node, ancestor_type):
parent = node
while True:
if parent is self.root:
return None
parent = self.getParent(parent)
if isinstance(parent, ancestor_type):
return parent
def getScopeNode(self, node):
return self._getAncestor(node, tuple(Checker._ast_node_scope.keys()))
def differentForks(self, lnode, rnode):
"""True, if lnode and rnode are located on different forks of IF/TRY"""
ancestor = self.getCommonAncestor(lnode, rnode, self.root)
parts = getAlternatives(ancestor)
if parts:
for items in parts:
if self.descendantOf(lnode, items, ancestor) ^ \
self.descendantOf(rnode, items, ancestor):
return True
return False
def addBinding(self, node, value):
"""
Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the new value, a Binding instance
"""
# assert value.source in (node, node._pyflakes_parent):
for scope in self.scopeStack[::-1]:
if value.name in scope:
break
existing = scope.get(value.name)
if (existing and not isinstance(existing, Builtin) and
not self.differentForks(node, existing.source)):
parent_stmt = self.getParent(value.source)
if isinstance(existing, Importation) and isinstance(parent_stmt, FOR_TYPES):
self.report(messages.ImportShadowedByLoopVar,
node, value.name, existing.source)
elif scope is self.scope:
if (isinstance(parent_stmt, ast.comprehension) and
not isinstance(self.getParent(existing.source),
(FOR_TYPES, ast.comprehension))):
self.report(messages.RedefinedInListComp,
node, value.name, existing.source)
elif not existing.used and value.redefines(existing):
if value.name != '_' or isinstance(existing, Importation):
if not is_typing_overload(existing, self.scopeStack):
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
elif isinstance(existing, Importation) and value.redefines(existing):
existing.redefined.append(node)
if value.name in self.scope:
# then assume the rebound name is used as a global or within a loop
value.used = self.scope[value.name].used
# don't treat annotations as assignments if there is an existing value
# in scope
if value.name not in self.scope or not isinstance(value, Annotation):
self.scope[value.name] = value
def _unknown_handler(self, node):
# this environment variable configures whether to error on unknown
# ast types.
#
# this is silent by default but the error is enabled for the pyflakes
# testsuite.
#
# this allows new syntax to be added to python without *requiring*
# changes from the pyflakes side. but will still produce an error
# in the pyflakes testsuite (so more specific handling can be added if
# needed).
if os.environ.get('PYFLAKES_ERROR_UNKNOWN'):
raise NotImplementedError('Unexpected type: {}'.format(type(node)))
else:
self.handleChildren(node)
def getNodeHandler(self, node_class):
try:
return self._nodeHandlers[node_class]
except KeyError:
nodeType = getNodeType(node_class)
self._nodeHandlers[node_class] = handler = getattr(
self, nodeType, self._unknown_handler,
)
return handler
def handleNodeLoad(self, node):
name = getNodeName(node)
if not name:
return
in_generators = None
importStarred = None
# try enclosing function scopes and global scope
for scope in self.scopeStack[-1::-1]:
if isinstance(scope, ClassScope):
if not PY2 and name == '__class__':
return
elif in_generators is False:
# only generators used in a class scope can access the
# names of the class. this is skipped during the first
# iteration
continue
binding = scope.get(name, None)
if isinstance(binding, Annotation) and not self._in_postponed_annotation:
continue
if name == 'print' and isinstance(binding, Builtin):
parent = self.getParent(node)
if (isinstance(parent, ast.BinOp) and
isinstance(parent.op, ast.RShift)):
self.report(messages.InvalidPrintSyntax, node)
try:
scope[name].used = (self.scope, node)
# if the name of SubImportation is same as
# alias of other Importation and the alias
# is used, SubImportation also should be marked as used.
n = scope[name]
if isinstance(n, Importation) and n._has_alias():
try:
scope[n.fullName].used = (self.scope, node)
except KeyError:
pass
except KeyError:
pass
else:
return
importStarred = importStarred or scope.importStarred
if in_generators is not False:
in_generators = isinstance(scope, GeneratorScope)
if importStarred:
from_list = []
for scope in self.scopeStack[-1::-1]:
for binding in scope.values():
if isinstance(binding, StarImportation):
# mark '*' imports as used for each scope
binding.used = (self.scope, node)
from_list.append(binding.fullName)
# report * usage, with a list of possible sources
from_list = ', '.join(sorted(from_list))
self.report(messages.ImportStarUsage, node, name, from_list)
return
if name == '__path__' and os.path.basename(self.filename) == '__init__.py':
# the special name __path__ is valid only in packages
return
if name in DetectClassScopedMagic.names and isinstance(self.scope, ClassScope):
return
# protected with a NameError handler?
if 'NameError' not in self.exceptHandlers[-1]:
self.report(messages.UndefinedName, node, name)
def handleNodeStore(self, node):
name = getNodeName(node)
if not name:
return
# if the name hasn't already been defined in the current scope
if isinstance(self.scope, FunctionScope) and name not in self.scope:
# for each function or module scope above us
for scope in self.scopeStack[:-1]:
if not isinstance(scope, (FunctionScope, ModuleScope)):
continue
# if the name was defined in that scope, and the name has
# been accessed already in the current scope, and hasn't
# been declared global
used = name in scope and scope[name].used
if used and used[0] is self.scope and name not in self.scope.globals:
# then it's probably a mistake
self.report(messages.UndefinedLocal,
scope[name].used[1], name, scope[name].source)
break
parent_stmt = self.getParent(node)
if isinstance(parent_stmt, ANNASSIGN_TYPES) and parent_stmt.value is None:
binding = Annotation(name, node)
elif isinstance(parent_stmt, (FOR_TYPES, ast.comprehension)) or (
parent_stmt != node._pyflakes_parent and
not self.isLiteralTupleUnpacking(parent_stmt)):
binding = Binding(name, node)
elif (
name == '__all__' and
isinstance(self.scope, ModuleScope) and
isinstance(
node._pyflakes_parent,
(ast.Assign, ast.AugAssign, ast.AnnAssign)
)
):
binding = ExportBinding(name, node._pyflakes_parent, self.scope)
elif PY2 and isinstance(getattr(node, 'ctx', None), ast.Param):
binding = Argument(name, self.getScopeNode(node))
else:
binding = Assignment(name, node)
self.addBinding(node, binding)
def handleNodeDelete(self, node):
def on_conditional_branch():
"""
Return `True` if node is part of a conditional body.
"""
current = getattr(node, '_pyflakes_parent', None)
while current:
if isinstance(current, (ast.If, ast.While, ast.IfExp)):
return True
current = getattr(current, '_pyflakes_parent', None)
return False
name = getNodeName(node)
if not name:
return
if on_conditional_branch():
# We cannot predict if this conditional branch is going to
# be executed.
return
if isinstance(self.scope, FunctionScope) and name in self.scope.globals:
self.scope.globals.remove(name)
else:
try:
del self.scope[name]
except KeyError:
self.report(messages.UndefinedName, node, name)
@contextlib.contextmanager
def _enter_annotation(self, ann_type=AnnotationState.BARE):
orig, self._in_annotation = self._in_annotation, ann_type
try:
yield
finally:
self._in_annotation = orig
@property
def _in_postponed_annotation(self):
return (
self._in_annotation == AnnotationState.STRING or
self.annotationsFutureEnabled
)
def _handle_type_comments(self, node):
for (lineno, col_offset), comment in self._type_comments.get(node, ()):
comment = comment.split(':', 1)[1].strip()
func_match = TYPE_FUNC_RE.match(comment)
if func_match:
parts = (
func_match.group(1).replace('*', ''),
func_match.group(2).strip(),
)
else:
parts = (comment,)
for part in parts:
if PY2:
part = part.replace('...', 'Ellipsis')
self.deferFunction(functools.partial(
self.handleStringAnnotation,
part, DummyNode(lineno, col_offset), lineno, col_offset,
messages.CommentAnnotationSyntaxError,
))
def handleChildren(self, tree, omit=None):
self._handle_type_comments(tree)
for node in iter_child_nodes(tree, omit=omit):
self.handleNode(node, tree)
def isLiteralTupleUnpacking(self, node):
if isinstance(node, ast.Assign):
for child in node.targets + [node.value]:
if not hasattr(child, 'elts'):
return False
return True
def isDocstring(self, node):
"""
Determine if the given node is a docstring, as long as it is at the
correct place in the node tree.
"""
return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and
isinstance(node.value, ast.Str))
def getDocstring(self, node):
if isinstance(node, ast.Expr):
node = node.value
if not isinstance(node, ast.Str):
return (None, None)
if PYPY or PY38_PLUS:
doctest_lineno = node.lineno - 1
else:
# Computed incorrectly if the docstring has backslash
doctest_lineno = node.lineno - node.s.count('\n') - 1
return (node.s, doctest_lineno)
def handleNode(self, node, parent):
if node is None:
return
if self.offset and getattr(node, 'lineno', None) is not None:
node.lineno += self.offset[0]
node.col_offset += self.offset[1]
if self.futuresAllowed and not (isinstance(node, ast.ImportFrom) or
self.isDocstring(node)):
self.futuresAllowed = False
self.nodeDepth += 1
node._pyflakes_depth = self.nodeDepth
node._pyflakes_parent = parent
try:
handler = self.getNodeHandler(node.__class__)
handler(node)
finally:
self.nodeDepth -= 1
_getDoctestExamples = doctest.DocTestParser().get_examples
def handleDoctests(self, node):
try:
if hasattr(node, 'docstring'):
docstring = node.docstring
# This is just a reasonable guess. In Python 3.7, docstrings no
# longer have line numbers associated with them. This will be
# incorrect if there are empty lines between the beginning
# of the function and the docstring.
node_lineno = node.lineno
if hasattr(node, 'args'):
node_lineno = max([node_lineno] +
[arg.lineno for arg in node.args.args])
else:
(docstring, node_lineno) = self.getDocstring(node.body[0])
examples = docstring and self._getDoctestExamples(docstring)
except (ValueError, IndexError):
# e.g. line 6 of the docstring for <string> has inconsistent
# leading whitespace: ...
return
if not examples:
return
# Place doctest in module scope
saved_stack = self.scopeStack
self.scopeStack = [self.scopeStack[0]]
node_offset = self.offset or (0, 0)
self.pushScope(DoctestScope)
if '_' not in self.scopeStack[0]:
self.addBinding(None, Builtin('_'))
for example in examples:
try:
tree = ast.parse(example.source, "<doctest>")
except SyntaxError:
e = sys.exc_info()[1]
if PYPY:
e.offset += 1
position = (node_lineno + example.lineno + e.lineno,
example.indent + 4 + (e.offset or 0))
self.report(messages.DoctestSyntaxError, node, position)
else:
self.offset = (node_offset[0] + node_lineno + example.lineno,
node_offset[1] + example.indent + 4)
self.handleChildren(tree)
self.offset = node_offset
self.popScope()
self.scopeStack = saved_stack
@in_string_annotation
def handleStringAnnotation(self, s, node, ref_lineno, ref_col_offset, err):
try:
tree = ast.parse(s)
except SyntaxError:
self.report(err, node, s)
return
body = tree.body
if len(body) != 1 or not isinstance(body[0], ast.Expr):
self.report(err, node, s)
return
parsed_annotation = tree.body[0].value
for descendant in ast.walk(parsed_annotation):
if (
'lineno' in descendant._attributes and
'col_offset' in descendant._attributes
):
descendant.lineno = ref_lineno
descendant.col_offset = ref_col_offset
self.handleNode(parsed_annotation, node)
@in_annotation
def handleAnnotation(self, annotation, node):
if isinstance(annotation, ast.Str):
# Defer handling forward annotation.
self.deferFunction(functools.partial(
self.handleStringAnnotation,
annotation.s,
node,
annotation.lineno,
annotation.col_offset,
messages.ForwardAnnotationSyntaxError,
))
elif self.annotationsFutureEnabled:
fn = in_annotation(Checker.handleNode)
self.deferFunction(lambda: fn(self, annotation, node))
else:
self.handleNode(annotation, node)
def ignore(self, node):
pass
# "stmt" type nodes
DELETE = PRINT = FOR = ASYNCFOR = WHILE = WITH = WITHITEM = \
ASYNCWITH = ASYNCWITHITEM = TRYFINALLY = EXEC = \
EXPR = ASSIGN = handleChildren
PASS = ignore
# "expr" type nodes
BOOLOP = UNARYOP = SET = \
REPR = ATTRIBUTE = \
STARRED = NAMECONSTANT = NAMEDEXPR = handleChildren
def SUBSCRIPT(self, node):
if _is_name_or_attr(node.value, 'Literal'):
with self._enter_annotation(AnnotationState.NONE):
self.handleChildren(node)
elif _is_name_or_attr(node.value, 'Annotated'):
self.handleNode(node.value, node)
# py39+
if isinstance(node.slice, ast.Tuple):
slice_tuple = node.slice
# <py39
elif (
isinstance(node.slice, ast.Index) and
isinstance(node.slice.value, ast.Tuple)
):
slice_tuple = node.slice.value
else:
slice_tuple = None
# not a multi-arg `Annotated`
if slice_tuple is None or len(slice_tuple.elts) < 2:
self.handleNode(node.slice, node)
else:
# the first argument is the type
self.handleNode(slice_tuple.elts[0], node)
# the rest of the arguments are not
with self._enter_annotation(AnnotationState.NONE):
for arg in slice_tuple.elts[1:]:
self.handleNode(arg, node)
self.handleNode(node.ctx, node)
else:
if _is_any_typing_member(node.value, self.scopeStack):
with self._enter_annotation():
self.handleChildren(node)
else:
self.handleChildren(node)
def _handle_string_dot_format(self, node):
try:
placeholders = tuple(parse_format_string(node.func.value.s))
except ValueError as e:
self.report(messages.StringDotFormatInvalidFormat, node, e)
return
class state: # py2-compatible `nonlocal`
auto = None
next_auto = 0
placeholder_positional = set()
placeholder_named = set()
def _add_key(fmtkey):
"""Returns True if there is an error which should early-exit"""
if fmtkey is None: # end of string or `{` / `}` escapes
return False
# attributes / indices are allowed in `.format(...)`
fmtkey, _, _ = fmtkey.partition('.')
fmtkey, _, _ = fmtkey.partition('[')
try:
fmtkey = int(fmtkey)
except ValueError:
pass
else: # fmtkey was an integer
if state.auto is True:
self.report(messages.StringDotFormatMixingAutomatic, node)
return True
else:
state.auto = False
if fmtkey == '':
if state.auto is False:
self.report(messages.StringDotFormatMixingAutomatic, node)
return True
else:
state.auto = True
fmtkey = state.next_auto
state.next_auto += 1
if isinstance(fmtkey, int):
placeholder_positional.add(fmtkey)
else:
placeholder_named.add(fmtkey)
return False
for _, fmtkey, spec, _ in placeholders:
if _add_key(fmtkey):
return
# spec can also contain format specifiers
if spec is not None:
try:
spec_placeholders = tuple(parse_format_string(spec))
except ValueError as e:
self.report(messages.StringDotFormatInvalidFormat, node, e)
return
for _, spec_fmtkey, spec_spec, _ in spec_placeholders:
# can't recurse again
if spec_spec is not None and '{' in spec_spec:
self.report(
messages.StringDotFormatInvalidFormat,
node,
'Max string recursion exceeded',
)
return
if _add_key(spec_fmtkey):
return
# bail early if there is *args or **kwargs
if (
# python 2.x *args / **kwargs
getattr(node, 'starargs', None) or
getattr(node, 'kwargs', None) or
# python 3.x *args
any(
isinstance(arg, getattr(ast, 'Starred', ()))
for arg in node.args
) or
# python 3.x **kwargs
any(kwd.arg is None for kwd in node.keywords)
):
return
substitution_positional = set(range(len(node.args)))
substitution_named = {kwd.arg for kwd in node.keywords}
extra_positional = substitution_positional - placeholder_positional
extra_named = substitution_named - placeholder_named
missing_arguments = (
(placeholder_positional | placeholder_named) -
(substitution_positional | substitution_named)
)
if extra_positional:
self.report(
messages.StringDotFormatExtraPositionalArguments,
node,
', '.join(sorted(str(x) for x in extra_positional)),
)
if extra_named:
self.report(
messages.StringDotFormatExtraNamedArguments,
node,
', '.join(sorted(extra_named)),
)
if missing_arguments:
self.report(
messages.StringDotFormatMissingArgument,
node,
', '.join(sorted(str(x) for x in missing_arguments)),
)
def CALL(self, node):
if (
isinstance(node.func, ast.Attribute) and
isinstance(node.func.value, ast.Str) and
node.func.attr == 'format'
):
self._handle_string_dot_format(node)
omit = []
annotated = []
not_annotated = []
if (
_is_typing(node.func, 'cast', self.scopeStack) and
len(node.args) >= 1
):
with self._enter_annotation():
self.handleNode(node.args[0], node)
elif _is_typing(node.func, 'TypeVar', self.scopeStack):
# TypeVar("T", "int", "str")
omit += ["args"]
annotated += [arg for arg in node.args[1:]]
# TypeVar("T", bound="str")
omit += ["keywords"]
annotated += [k.value for k in node.keywords if k.arg == "bound"]
not_annotated += [
(k, ["value"] if k.arg == "bound" else None)
for k in node.keywords
]
elif _is_typing(node.func, "TypedDict", self.scopeStack):
# TypedDict("a", {"a": int})
if len(node.args) > 1 and isinstance(node.args[1], ast.Dict):
omit += ["args"]
annotated += node.args[1].values
not_annotated += [
(arg, ["values"] if i == 1 else None)
for i, arg in enumerate(node.args)
]
# TypedDict("a", a=int)
omit += ["keywords"]
annotated += [k.value for k in node.keywords]
not_annotated += [(k, ["value"]) for k in node.keywords]
elif _is_typing(node.func, "NamedTuple", self.scopeStack):
# NamedTuple("a", [("a", int)])
if (
len(node.args) > 1 and
isinstance(node.args[1], (ast.Tuple, ast.List)) and
all(isinstance(x, (ast.Tuple, ast.List)) and
len(x.elts) == 2 for x in node.args[1].elts)
):
omit += ["args"]
annotated += [elt.elts[1] for elt in node.args[1].elts]
not_annotated += [(elt.elts[0], None) for elt in node.args[1].elts]
not_annotated += [
(arg, ["elts"] if i == 1 else None)
for i, arg in enumerate(node.args)
]
not_annotated += [(elt, "elts") for elt in node.args[1].elts]
# NamedTuple("a", a=int)
omit += ["keywords"]
annotated += [k.value for k in node.keywords]
not_annotated += [(k, ["value"]) for k in node.keywords]
if omit:
with self._enter_annotation(AnnotationState.NONE):
for na_node, na_omit in not_annotated:
self.handleChildren(na_node, omit=na_omit)
self.handleChildren(node, omit=omit)
with self._enter_annotation():
for annotated_node in annotated:
self.handleNode(annotated_node, node)
else:
self.handleChildren(node)
def _handle_percent_format(self, node):
try:
placeholders = parse_percent_format(node.left.s)
except ValueError:
self.report(
messages.PercentFormatInvalidFormat,
node,
'incomplete format',
)
return
named = set()
positional_count = 0
positional = None
for _, placeholder in placeholders:
if placeholder is None:
continue
name, _, width, precision, conversion = placeholder
if conversion == '%':
continue
if conversion not in VALID_CONVERSIONS:
self.report(
messages.PercentFormatUnsupportedFormatCharacter,
node,
conversion,
)
if positional is None and conversion:
positional = name is None
for part in (width, precision):
if part is not None and '*' in part:
if not positional:
self.report(
messages.PercentFormatStarRequiresSequence,
node,
)
else:
positional_count += 1
if positional and name is not None:
self.report(
messages.PercentFormatMixedPositionalAndNamed,
node,
)
return
elif not positional and name is None:
self.report(
messages.PercentFormatMixedPositionalAndNamed,
node,
)
return
if positional:
positional_count += 1
else:
named.add(name)
if (
isinstance(node.right, (ast.List, ast.Tuple)) and
# does not have any *splats (py35+ feature)
not any(
isinstance(elt, getattr(ast, 'Starred', ()))
for elt in node.right.elts
)
):
substitution_count = len(node.right.elts)
if positional and positional_count != substitution_count:
self.report(
messages.PercentFormatPositionalCountMismatch,
node,
positional_count,
substitution_count,
)
elif not positional:
self.report(messages.PercentFormatExpectedMapping, node)
if (
isinstance(node.right, ast.Dict) and
all(isinstance(k, ast.Str) for k in node.right.keys)
):
if positional and positional_count > 1:
self.report(messages.PercentFormatExpectedSequence, node)
return
substitution_keys = {k.s for k in node.right.keys}
extra_keys = substitution_keys - named
missing_keys = named - substitution_keys
if not positional and extra_keys:
self.report(
messages.PercentFormatExtraNamedArguments,
node,
', '.join(sorted(extra_keys)),
)
if not positional and missing_keys:
self.report(
messages.PercentFormatMissingArgument,
node,
', '.join(sorted(missing_keys)),
)
def BINOP(self, node):
if (
isinstance(node.op, ast.Mod) and
isinstance(node.left, ast.Str)
):
self._handle_percent_format(node)
self.handleChildren(node)
def STR(self, node):
if self._in_annotation:
fn = functools.partial(
self.handleStringAnnotation,
node.s,
node,
node.lineno,
node.col_offset,
messages.ForwardAnnotationSyntaxError,
)
if self._in_deferred:
fn()
else:
self.deferFunction(fn)
if PY38_PLUS:
def CONSTANT(self, node):
if isinstance(node.value, str):
return self.STR(node)
else:
NUM = BYTES = ELLIPSIS = CONSTANT = ignore
# "slice" type nodes
SLICE = EXTSLICE = INDEX = handleChildren
# expression contexts are node instances too, though being constants
LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore
# same for operators
AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \
BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \
EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = \
MATMULT = ignore
def RAISE(self, node):
self.handleChildren(node)
arg = get_raise_argument(node)
if isinstance(arg, ast.Call):
if is_notimplemented_name_node(arg.func):
# Handle "raise NotImplemented(...)"
self.report(messages.RaiseNotImplemented, node)
elif is_notimplemented_name_node(arg):
# Handle "raise NotImplemented"
self.report(messages.RaiseNotImplemented, node)
# additional node types
COMPREHENSION = KEYWORD = FORMATTEDVALUE = handleChildren
_in_fstring = False
def JOINEDSTR(self, node):
if (
# the conversion / etc. flags are parsed as f-strings without
# placeholders
not self._in_fstring and
not any(isinstance(x, ast.FormattedValue) for x in node.values)
):
self.report(messages.FStringMissingPlaceholders, node)
self._in_fstring, orig = True, self._in_fstring
try:
self.handleChildren(node)
finally:
self._in_fstring = orig
def DICT(self, node):
# Complain if there are duplicate keys with different values
# If they have the same value it's not going to cause potentially
# unexpected behaviour so we'll not complain.
keys = [
convert_to_value(key) for key in node.keys
]
key_counts = counter(keys)
duplicate_keys = [
key for key, count in key_counts.items()
if count > 1
]
for key in duplicate_keys:
key_indices = [i for i, i_key in enumerate(keys) if i_key == key]
values = counter(
convert_to_value(node.values[index])
for index in key_indices
)
if any(count == 1 for value, count in values.items()):
for key_index in key_indices:
key_node = node.keys[key_index]
if isinstance(key, VariableKey):
self.report(messages.MultiValueRepeatedKeyVariable,
key_node,
key.name)
else:
self.report(
messages.MultiValueRepeatedKeyLiteral,
key_node,
key,
)
self.handleChildren(node)
def IF(self, node):
if isinstance(node.test, ast.Tuple) and node.test.elts != []:
self.report(messages.IfTuple, node)
self.handleChildren(node)
IFEXP = IF
def ASSERT(self, node):
if isinstance(node.test, ast.Tuple) and node.test.elts != []:
self.report(messages.AssertTuple, node)
self.handleChildren(node)
def GLOBAL(self, node):
"""
Keep track of globals declarations.
"""
global_scope_index = 1 if self._in_doctest() else 0
global_scope = self.scopeStack[global_scope_index]
# Ignore 'global' statement in global scope.
if self.scope is not global_scope:
# One 'global' statement can bind multiple (comma-delimited) names.
for node_name in node.names:
node_value = Assignment(node_name, node)
# Remove UndefinedName messages already reported for this name.
# TODO: if the global is not used in this scope, it does not
# become a globally defined name. See test_unused_global.
self.messages = [
m for m in self.messages if not
isinstance(m, messages.UndefinedName) or
m.message_args[0] != node_name]
# Bind name to global scope if it doesn't exist already.
global_scope.setdefault(node_name, node_value)
# Bind name to non-global scopes, but as already "used".
node_value.used = (global_scope, node)
for scope in self.scopeStack[global_scope_index + 1:]:
scope[node_name] = node_value
NONLOCAL = GLOBAL
def GENERATOREXP(self, node):
self.pushScope(GeneratorScope)
self.handleChildren(node)
self.popScope()
LISTCOMP = handleChildren if PY2 else GENERATOREXP
DICTCOMP = SETCOMP = GENERATOREXP
def NAME(self, node):
"""
Handle occurrence of Name (which can be a load/store/delete access.)
"""
# Locate the name in locals / function / globals scopes.
if isinstance(node.ctx, ast.Load):
self.handleNodeLoad(node)
if (node.id == 'locals' and isinstance(self.scope, FunctionScope) and
isinstance(node._pyflakes_parent, ast.Call)):
# we are doing locals() call in current scope
self.scope.usesLocals = True
elif isinstance(node.ctx, ast.Store):
self.handleNodeStore(node)
elif PY2 and isinstance(node.ctx, ast.Param):
self.handleNodeStore(node)
elif isinstance(node.ctx, ast.Del):
self.handleNodeDelete(node)
else:
# Unknown context
raise RuntimeError("Got impossible expression context: %r" % (node.ctx,))
def CONTINUE(self, node):
# Walk the tree up until we see a loop (OK), a function or class
# definition (not OK), for 'continue', a finally block (not OK), or
# the top module scope (not OK)
n = node
while hasattr(n, '_pyflakes_parent'):
n, n_child = n._pyflakes_parent, n
if isinstance(n, LOOP_TYPES):
# Doesn't apply unless it's in the loop itself
if n_child not in n.orelse:
return
if isinstance(n, (ast.FunctionDef, ast.ClassDef)):
break
# Handle Try/TryFinally difference in Python < and >= 3.3
if hasattr(n, 'finalbody') and isinstance(node, ast.Continue):
if n_child in n.finalbody and not PY38_PLUS:
self.report(messages.ContinueInFinally, node)
return
if isinstance(node, ast.Continue):
self.report(messages.ContinueOutsideLoop, node)
else: # ast.Break
self.report(messages.BreakOutsideLoop, node)
BREAK = CONTINUE
def RETURN(self, node):
if isinstance(self.scope, (ClassScope, ModuleScope)):
self.report(messages.ReturnOutsideFunction, node)
return
if (
node.value and
hasattr(self.scope, 'returnValue') and
not self.scope.returnValue
):
self.scope.returnValue = node.value
self.handleNode(node.value, node)
def YIELD(self, node):
if isinstance(self.scope, (ClassScope, ModuleScope)):
self.report(messages.YieldOutsideFunction, node)
return
self.scope.isGenerator = True
self.handleNode(node.value, node)
AWAIT = YIELDFROM = YIELD
def FUNCTIONDEF(self, node):
for deco in node.decorator_list:
self.handleNode(deco, node)
self.LAMBDA(node)
self.addBinding(node, FunctionDefinition(node.name, node))
# doctest does not process doctest within a doctest,
# or in nested functions.
if (self.withDoctest and
not self._in_doctest() and
not isinstance(self.scope, FunctionScope)):
self.deferFunction(lambda: self.handleDoctests(node))
ASYNCFUNCTIONDEF = FUNCTIONDEF
def LAMBDA(self, node):
args = []
annotations = []
if PY2:
def addArgs(arglist):
for arg in arglist:
if isinstance(arg, ast.Tuple):
addArgs(arg.elts)
else:
args.append(arg.id)
addArgs(node.args.args)
defaults = node.args.defaults
else:
if PY38_PLUS:
for arg in node.args.posonlyargs:
args.append(arg.arg)
annotations.append(arg.annotation)
for arg in node.args.args + node.args.kwonlyargs:
args.append(arg.arg)
annotations.append(arg.annotation)
defaults = node.args.defaults + node.args.kw_defaults
# Only for Python3 FunctionDefs
is_py3_func = hasattr(node, 'returns')
for arg_name in ('vararg', 'kwarg'):
wildcard = getattr(node.args, arg_name)
if not wildcard:
continue
args.append(wildcard if PY2 else wildcard.arg)
if is_py3_func:
if PY2: # Python 2.7
argannotation = arg_name + 'annotation'
annotations.append(getattr(node.args, argannotation))
else: # Python >= 3.4
annotations.append(wildcard.annotation)
if is_py3_func:
annotations.append(node.returns)
if len(set(args)) < len(args):
for (idx, arg) in enumerate(args):
if arg in args[:idx]:
self.report(messages.DuplicateArgument, node, arg)
for annotation in annotations:
self.handleAnnotation(annotation, node)
for default in defaults:
self.handleNode(default, node)
def runFunction():
self.pushScope()
self.handleChildren(node, omit=['decorator_list', 'returns'])
def checkUnusedAssignments():
"""
Check to see if any assignments have not been used.
"""
for name, binding in self.scope.unusedAssignments():
self.report(messages.UnusedVariable, binding.source, name)
self.deferAssignment(checkUnusedAssignments)
if PY2:
def checkReturnWithArgumentInsideGenerator():
"""
Check to see if there is any return statement with
arguments but the function is a generator.
"""
if self.scope.isGenerator and self.scope.returnValue:
self.report(messages.ReturnWithArgsInsideGenerator,
self.scope.returnValue)
self.deferAssignment(checkReturnWithArgumentInsideGenerator)
self.popScope()
self.deferFunction(runFunction)
def ARGUMENTS(self, node):
self.handleChildren(node, omit=('defaults', 'kw_defaults'))
if PY2:
scope_node = self.getScopeNode(node)
if node.vararg:
self.addBinding(node, Argument(node.vararg, scope_node))
if node.kwarg:
self.addBinding(node, Argument(node.kwarg, scope_node))
def ARG(self, node):
self.addBinding(node, Argument(node.arg, self.getScopeNode(node)))
def CLASSDEF(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
for deco in node.decorator_list:
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
if not PY2:
for keywordNode in node.keywords:
self.handleNode(keywordNode, node)
self.pushScope(ClassScope)
# doctest does not process doctest within a doctest
# classes within classes are processed.
if (self.withDoctest and
not self._in_doctest() and
not isinstance(self.scope, FunctionScope)):
self.deferFunction(lambda: self.handleDoctests(node))
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, ClassDefinition(node.name, node))
def AUGASSIGN(self, node):
self.handleNodeLoad(node.target)
self.handleNode(node.value, node)
self.handleNode(node.target, node)
def TUPLE(self, node):
if not PY2 and isinstance(node.ctx, ast.Store):
# Python 3 advanced tuple unpacking: a, *b, c = d.
# Only one starred expression is allowed, and no more than 1<<8
# assignments are allowed before a stared expression. There is
# also a limit of 1<<24 expressions after the starred expression,
# which is impossible to test due to memory restrictions, but we
# add it here anyway
has_starred = False
star_loc = -1
for i, n in enumerate(node.elts):
if isinstance(n, ast.Starred):
if has_starred:
self.report(messages.TwoStarredExpressions, node)
# The SyntaxError doesn't distinguish two from more
# than two.
break
has_starred = True
star_loc = i
if star_loc >= 1 << 8 or len(node.elts) - star_loc - 1 >= 1 << 24:
self.report(messages.TooManyExpressionsInStarredAssignment, node)
self.handleChildren(node)
LIST = TUPLE
def IMPORT(self, node):
for alias in node.names:
if '.' in alias.name and not alias.asname:
importation = SubmoduleImportation(alias.name, node)
else:
name = alias.asname or alias.name
importation = Importation(name, node, alias.name)
self.addBinding(node, importation)
def IMPORTFROM(self, node):
if node.module == '__future__':
if not self.futuresAllowed:
self.report(messages.LateFutureImport,
node, [n.name for n in node.names])
else:
self.futuresAllowed = False
module = ('.' * node.level) + (node.module or '')
for alias in node.names:
name = alias.asname or alias.name
if node.module == '__future__':
importation = FutureImportation(name, node, self.scope)
if alias.name not in __future__.all_feature_names:
self.report(messages.FutureFeatureNotDefined,
node, alias.name)
if alias.name == 'annotations':
self.annotationsFutureEnabled = True
elif alias.name == '*':
# Only Python 2, local import * is a SyntaxWarning
if not PY2 and not isinstance(self.scope, ModuleScope):
self.report(messages.ImportStarNotPermitted,
node, module)
continue
self.scope.importStarred = True
self.report(messages.ImportStarUsed, node, module)
importation = StarImportation(module, node)
else:
importation = ImportationFrom(name, node,
module, alias.name)
self.addBinding(node, importation)
def TRY(self, node):
handler_names = []
# List the exception handlers
for i, handler in enumerate(node.handlers):
if isinstance(handler.type, ast.Tuple):
for exc_type in handler.type.elts:
handler_names.append(getNodeName(exc_type))
elif handler.type:
handler_names.append(getNodeName(handler.type))
if handler.type is None and i < len(node.handlers) - 1:
self.report(messages.DefaultExceptNotLast, handler)
# Memorize the except handlers and process the body
self.exceptHandlers.append(handler_names)
for child in node.body:
self.handleNode(child, node)
self.exceptHandlers.pop()
# Process the other nodes: "except:", "else:", "finally:"
self.handleChildren(node, omit='body')
TRYEXCEPT = TRY
def EXCEPTHANDLER(self, node):
if PY2 or node.name is None:
self.handleChildren(node)
return
# If the name already exists in the scope, modify state of existing
# binding.
if node.name in self.scope:
self.handleNodeStore(node)
# 3.x: the name of the exception, which is not a Name node, but a
# simple string, creates a local that is only bound within the scope of
# the except: block. As such, temporarily remove the existing binding
# to more accurately determine if the name is used in the except:
# block.
try:
prev_definition = self.scope.pop(node.name)
except KeyError:
prev_definition = None
self.handleNodeStore(node)
self.handleChildren(node)
# See discussion on https://github.com/PyCQA/pyflakes/pull/59
# We're removing the local name since it's being unbound after leaving
# the except: block and it's always unbound if the except: block is
# never entered. This will cause an "undefined name" error raised if
# the checked code tries to use the name afterwards.
#
# Unless it's been removed already. Then do nothing.
try:
binding = self.scope.pop(node.name)
except KeyError:
pass
else:
if not binding.used:
self.report(messages.UnusedVariable, node, node.name)
# Restore.
if prev_definition:
self.scope[node.name] = prev_definition
def ANNASSIGN(self, node):
self.handleNode(node.target, node)
self.handleAnnotation(node.annotation, node)
if node.value:
# If the assignment has value, handle the *value* now.
self.handleNode(node.value, node)
def COMPARE(self, node):
left = node.left
for op, right in zip(node.ops, node.comparators):
if (
isinstance(op, (ast.Is, ast.IsNot)) and (
_is_const_non_singleton(left) or
_is_const_non_singleton(right)
)
):
self.report(messages.IsLiteral, node)
left = right
self.handleChildren(node)
MATCH = MATCH_CASE = MATCHCLASS = MATCHOR = MATCHSEQUENCE = handleChildren
MATCHSINGLETON = MATCHVALUE = handleChildren
def _match_target(self, node):
self.handleNodeStore(node)
self.handleChildren(node)
MATCHAS = MATCHMAPPING = MATCHSTAR = _match_target
|
PyCQA/pyflakes
|
pyflakes/checker.py
|
Python
|
mit
| 84,839
|
[
"VisIt"
] |
eb09eac9cd06878b7b7c14feca87ec6b10f6888e8e6ea1d2b5ea7cb0e8bec217
|
#! /usr/bin/python
# Python 2.7.5, requires Biopython.
'''
Created on 07/06/2014
@author: Adam_Taranto
'''
import argparse;
import getopt;
import sys;
import re;
from Bio import SeqIO;
from Bio.Seq import Seq;
#from Bio.Seq import MutableSeq;
from Bio.SeqRecord import SeqRecord;
#from Bio.Alphabet import IUPAC;
def main(filename, headerRow, decimalPlaces):
# Set variables
outFunc = writeTab
headFunc = writeTab
formatString = "{0:." + str(decimalPlaces) + "f}"
# Do the work
with open(filename, "rU") as handle:
if headerRow:
headFunc(("SeqID",
"Seq_len",
"GC_CONTENT",
"N_Count",
"CpG_OBS",
"CpG_EXP",
"CpG_OE",
"CpHpG_OBS",
"CpHpG_EXP",
"CpHpG_OE",
"CpHpH_OBS",
"CpHpH_EXP",
"CpHpH_OE"
))
# For each sequence
for record in SeqIO.parse(handle, "fasta"):
# Remove lowercase characters
record = record.upper()
# Convert mRNA to DNA
if "U" in record.seq :
toDNA = record.seq.back_transcribe()
record = SeqRecord(toDNA, id=record.id, name=record.name)
# Initialise counters!
baseTotals = {'A':0.0, 'T':0.0, 'C':0.0, 'G':0.0,}
pairTotals = {'AA':0.0,'AT':0.0,'AC':0.0,'AG':0.0,
'TA':0.0,'TT':0.0,'TC':0.0,'TG':0.0,
'CA':0.0,'CT':0.0,'CC':0.0,'CG':0.0,
'GA':0.0,'GT':0.0,'GC':0.0,'GG':0.0,}
tripletTotals = {'AAA':0.0,'AAC':0.0,'AAG':0.0,'AAT':0.0,
'ACA':0.0,'ACC':0.0,'ACG':0.0,'ACT':0.0,
'AGA':0.0,'AGC':0.0,'AGG':0.0,'AGT':0.0,
'ATA':0.0,'ATC':0.0,'ATG':0.0,'ATT':0.0,
'CAA':0.0,'CAC':0.0,'CAG':0.0,'CAT':0.0,
'CCA':0.0,'CCC':0.0,'CCG':0.0,'CCT':0.0,
'CGA':0.0,'CGC':0.0,'CGG':0.0,'CGT':0.0,
'CTA':0.0,'CTC':0.0,'CTG':0.0,'CTT':0.0,
'GAA':0.0,'GAC':0.0,'GAG':0.0,'GAT':0.0,
'GCA':0.0,'GCC':0.0,'GCG':0.0,'GCT':0.0,
'GGA':0.0,'GGC':0.0,'GGG':0.0,'GGT':0.0,
'GTA':0.0,'GTC':0.0,'GTG':0.0,'GTT':0.0,
'TAA':0.0,'TAC':0.0,'TAG':0.0,'TAT':0.0,
'TCA':0.0,'TCC':0.0,'TCG':0.0,'TCT':0.0,
'TGA':0.0,'TGC':0.0,'TGG':0.0,'TGT':0.0,
'TTA':0.0,'TTC':0.0,'TTG':0.0,'TTT':0.0,}
Sec2lastbase = 'N'
lastbase = 'N'
baseidx = 0
seqlen = len(record.seq)
# For each base in sequence
for base in record.seq:
# Sum the new triplet
if base != 'N':
# If current base is not N, then count current base
baseTotals[base] += 1
if lastbase != 'N':
#If lastbase was also not an N then count the current pair
pairTotals[lastbase+base] += 1
if Sec2lastbase != 'N':
#If no Ns in triplet count current triplet
tripletTotals[Sec2lastbase+lastbase+base] += 1
# End of gene?
if baseidx == (seqlen - 1):
# Calculate stats
gcContent = None
CpG_OBS = None
CpG_EXP = None
CpG_OE = None
CpHpG_OBS = None
CpHpG_EXP = None
CpHpG_OE = None
CpHpH_OBS = None
CpHpH_EXP = None
CpHpH_OE = None
# Ncount
nCount = len(record.seq) - sum(baseTotals.values())
if seqlen > 0:
probA = baseTotals['A'] / seqlen
probT = baseTotals['T'] / seqlen
probG = baseTotals['G'] / seqlen
probC = baseTotals['C'] / seqlen
probH = (1 - probG)
# Only need to validate that you are not going to divide by zero.
# CpG_OBS
if seqlen > 0:
CpG_OBS_Num = pairTotals['CG']
CpG_OBS = formatString.format(CpG_OBS_Num)
# CpG_EXP
if seqlen > 0:
CpG_EXP_Num = (baseTotals['C'] * baseTotals['G']) / seqlen
CpG_EXP = formatString.format(CpG_EXP_Num)
# CpG_OE
if CpG_EXP_Num != 0:
CpG_OE_Num = CpG_OBS_Num / CpG_EXP_Num
CpG_OE = formatString.format(CpG_OE_Num)
## CpHpG
if seqlen > 0:
CpHpG_OBS_Num = tripletTotals['CAG'] + tripletTotals['CCG'] + tripletTotals['CTG']
##CpHpG_EXP_Num_NEG = tripletTotals['CTG'] + tripletTotals['CGG'] + tripletTotals['CAG']
CpHpG_OBS = formatString.format(CpHpG_OBS_Num)
# CpHpG_EXP
if seqlen > 0:
CpHpG_EXP_Num = (probC * probH * probG) * seqlen
CpHpG_EXP = formatString.format(CpHpG_EXP_Num)
# CpHpG_OE
if CpHpG_EXP_Num != 0:
CpHpG_OE_Num = CpHpG_OBS_Num / CpHpG_EXP_Num
CpHpG_OE = formatString.format(CpHpG_OE_Num)
## CpHpH
if seqlen > 0:
CpHpH_OBS_Num = (tripletTotals['CAA'] + tripletTotals['CCA'] + tripletTotals['CTA'] +
tripletTotals['CAT'] + tripletTotals['CCT'] + tripletTotals['CTT'] +
tripletTotals['CAC'] + tripletTotals['CCC'] + tripletTotals['CTC'])
##CpHpH_OBS_Num_NEG = (tripletTotals['GAG'] + tripletTotals['GGG'] + tripletTotals['GTG'] +
## tripletTotals['AAG'] + tripletTotals['AGG'] + tripletTotals['ATG'] +
## tripletTotals['TAG'] + tripletTotals['TGG'] + tripletTotals['TTG'])
CpHpH_OBS = formatString.format(CpHpH_OBS_Num)
# CpHpH_EXP
if seqlen > 0:
CpHpH_EXP_Num = (probC * probH * probH) * seqlen
CpHpH_EXP = formatString.format(CpHpH_EXP_Num)
# CpHpH_OE
if CpHpH_EXP_Num > 0:
CpHpH_OE_Num = CpHpH_OBS_Num / CpHpH_EXP_Num
CpHpH_OE = formatString.format(CpHpH_OE_Num)
# GC content
ACGT = baseTotals['A'] + baseTotals['T'] + baseTotals['C'] + baseTotals['G']
if ACGT > 0:
gcContent = formatString.format((baseTotals['C'] + baseTotals['G']) / ACGT)
# Print the results
outFunc((record.id, # seq id
str(seqlen), # sequence length
str(gcContent), # GC content
str(nCount), # total N's in window
str(CpG_OBS),
str(CpG_EXP),
str(CpG_OE),
str(CpHpG_OBS),
str(CpHpG_EXP),
str(CpHpG_OE),
str(CpHpH_OBS),
str(CpHpH_EXP),
str(CpHpH_OE),
))
# Update counters and trackers
baseidx += 1
Sec2lastbase = lastbase
lastbase = base
# Loop to next base
# next sequence
handle.close()
## Output format writer functions ##
def writeTab(record):
'''Writes a record in Tab-delimited format'''
delimiter = "\t"
print(delimiter.join(record))
if __name__=='__main__':
### Argument handling
arg_parser = argparse.ArgumentParser(description='Calculate observed vs expected instances of DNA-methylation motifs in gene sequences');
arg_parser.add_argument("filename", help="A fasta file containing DNA coding sequences");
arg_parser.add_argument("-H", "--header", type=bool, default=True, help="Print header row on tab delimited output");
arg_parser.add_argument("-d", "--decimal", type=int, default=3, help="Format values to x decimal places");
args = arg_parser.parse_args();
### Variable definitions/declarations
filename = args.filename;
headerRow = args.header;
decimalPlaces = args.decimal;
## Pass variables to main script
main(filename, headerRow, decimalPlaces);
|
Adamtaranto/methFreq
|
methFreq.py
|
Python
|
mit
| 9,191
|
[
"Biopython"
] |
7e486df9823e7c083f24f69befecd53cb2dc18271cb8c0cabccf72ac5af4be66
|
"""TransformationCleaningAgent cleans up finalised transformations.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN TransformationCleaningAgent
:end-before: ##END
:dedent: 2
:caption: TransformationCleaningAgent options
"""
__RCSID__ = "$Id$"
# # imports
import re
import ast
import os.path
from datetime import datetime, timedelta
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.Proxy import executeWithUserProxy
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
# # agent's name
AGENT_NAME = 'Transformation/TransformationCleaningAgent'
class TransformationCleaningAgent(AgentModule):
"""
.. class:: TransformationCleaningAgent
:param ~DIRAC.DataManagementSystem.Client.DataManager.DataManager dm: DataManager instance
:param ~TransformationClient.TransformationClient transClient: TransformationClient instance
:param ~FileCatalogClient.FileCatalogClient metadataClient: FileCatalogClient instance
"""
def __init__(self, *args, **kwargs):
""" c'tor
"""
AgentModule.__init__(self, *args, **kwargs)
# # transformation client
self.transClient = None
# # wms client
self.wmsClient = None
# # request client
self.reqClient = None
# # file catalog client
self.metadataClient = None
# # transformations types
self.transformationTypes = None
# # directory locations
self.directoryLocations = ['TransformationDB', 'MetadataCatalog']
# # transformation metadata
self.transfidmeta = 'TransformationID'
# # archive periof in days
self.archiveAfter = 7
# # active SEs
self.activeStorages = []
# # transformation log SEs
self.logSE = 'LogSE'
# # enable/disable execution
self.enableFlag = 'True'
self.dataProcTTypes = ['MCSimulation', 'Merge']
self.dataManipTTypes = ['Replication', 'Removal']
def initialize(self):
""" agent initialisation
reading and setting confing opts
:param self: self reference
"""
# # shifter proxy
# See cleanCatalogContents method: this proxy will be used ALSO when the file catalog used
# is the DIRAC File Catalog (DFC).
# This is possible because of unset of the "UseServerCertificate" option
self.shifterProxy = self.am_getOption('shifterProxy', None)
# # transformations types
self.dataProcTTypes = Operations().getValue('Transformations/DataProcessing', self.dataProcTTypes)
self.dataManipTTypes = Operations().getValue('Transformations/DataManipulation', self.dataManipTTypes)
agentTSTypes = self.am_getOption('TransformationTypes', [])
if agentTSTypes:
self.transformationTypes = sorted(agentTSTypes)
else:
self.transformationTypes = sorted(self.dataProcTTypes + self.dataManipTTypes)
self.log.info("Will consider the following transformation types: %s" % str(self.transformationTypes))
# # directory locations
self.directoryLocations = sorted(self.am_getOption('DirectoryLocations', self.directoryLocations))
self.log.info("Will search for directories in the following locations: %s" % str(self.directoryLocations))
# # transformation metadata
self.transfidmeta = self.am_getOption('TransfIDMeta', self.transfidmeta)
self.log.info("Will use %s as metadata tag name for TransformationID" % self.transfidmeta)
# # archive periof in days
self.archiveAfter = self.am_getOption('ArchiveAfter', self.archiveAfter) # days
self.log.info("Will archive Completed transformations after %d days" % self.archiveAfter)
# # active SEs
self.activeStorages = sorted(self.am_getOption('ActiveSEs', self.activeStorages))
if self.activeStorages:
self.log.info("Will check the following storage elements: %s" % str(self.activeStorages))
# # transformation log SEs
self.logSE = Operations().getValue('/LogStorage/LogSE', self.logSE)
self.log.info("Will remove logs found on storage element: %s" % self.logSE)
# # transformation client
self.transClient = TransformationClient()
# # wms client
self.wmsClient = WMSClient()
# # request client
self.reqClient = ReqClient()
# # file catalog client
self.metadataClient = FileCatalogClient()
return S_OK()
#############################################################################
def execute(self):
""" execution in one agent's cycle
:param self: self reference
"""
self.enableFlag = self.am_getOption('EnableFlag', self.enableFlag)
if self.enableFlag != 'True':
self.log.info('TransformationCleaningAgent is disabled by configuration option EnableFlag')
return S_OK('Disabled via CS flag')
# Obtain the transformations in Cleaning status and remove any mention of the jobs/files
res = self.transClient.getTransformations({'Status': 'Cleaning',
'Type': self.transformationTypes})
if res['OK']:
for transDict in res['Value']:
if self.shifterProxy:
self._executeClean(transDict)
else:
self.log.info("Cleaning transformation %(TransformationID)s with %(AuthorDN)s, %(AuthorGroup)s" %
transDict)
executeWithUserProxy(self._executeClean)(transDict,
proxyUserDN=transDict['AuthorDN'],
proxyUserGroup=transDict['AuthorGroup'])
else:
self.log.error("Failed to get transformations", res['Message'])
# Obtain the transformations in RemovingFiles status and removes the output files
res = self.transClient.getTransformations({'Status': 'RemovingFiles',
'Type': self.transformationTypes})
if res['OK']:
for transDict in res['Value']:
if self.shifterProxy:
self._executeRemoval(transDict)
else:
self.log.info("Removing files for transformation %(TransformationID)s with %(AuthorDN)s, %(AuthorGroup)s" %
transDict)
executeWithUserProxy(self._executeRemoval)(transDict,
proxyUserDN=transDict['AuthorDN'],
proxyUserGroup=transDict['AuthorGroup'])
else:
self.log.error("Could not get the transformations", res['Message'])
# Obtain the transformations in Completed status and archive if inactive for X days
olderThanTime = datetime.utcnow() - timedelta(days=self.archiveAfter)
res = self.transClient.getTransformations({'Status': 'Completed',
'Type': self.transformationTypes},
older=olderThanTime,
timeStamp='LastUpdate')
if res['OK']:
for transDict in res['Value']:
if self.shifterProxy():
self._executeArchive(transDict)
else:
self.log.info("Archiving files for transformation %(TransformationID)s with %(AuthorDN)s, %(AuthorGroup)s" %
transDict)
executeWithUserProxy(self._executeArchive)(transDict,
proxyUserDN=transDict['AuthorDN'],
proxyUserGroup=transDict['AuthorGroup'])
else:
self.log.error("Could not get the transformations", res['Message'])
return S_OK()
def _executeClean(self, transDict):
"""Clean transformation."""
# if transformation is of type `Replication` or `Removal`, there is nothing to clean.
# We just archive
if transDict['Type'] in self.dataManipTTypes:
res = self.archiveTransformation(transDict['TransformationID'])
if not res['OK']:
self.log.error("Problems archiving transformation %s: %s" % (transDict['TransformationID'],
res['Message']))
else:
res = self.cleanTransformation(transDict['TransformationID'])
if not res['OK']:
self.log.error("Problems cleaning transformation %s: %s" % (transDict['TransformationID'],
res['Message']))
def _executeRemoval(self, transDict):
"""Remove files from given transformation."""
res = self.removeTransformationOutput(transDict['TransformationID'])
if not res['OK']:
self.log.error("Problems removing transformation %s: %s" % (transDict['TransformationID'],
res['Message']))
def _executeArchive(self, transDict):
"""Archive the given transformation."""
res = self.archiveTransformation(transDict['TransformationID'])
if not res['OK']:
self.log.error("Problems archiving transformation %s: %s" % (transDict['TransformationID'],
res['Message']))
return S_OK()
#############################################################################
#
# Get the transformation directories for checking
#
def getTransformationDirectories(self, transID):
""" get the directories for the supplied transformation from the transformation system.
These directories are used by removeTransformationOutput and cleanTransformation for removing output.
:param self: self reference
:param int transID: transformation ID
"""
self.log.verbose("Cleaning Transformation directories of transformation %d" % transID)
directories = []
if 'TransformationDB' in self.directoryLocations:
res = self.transClient.getTransformationParameters(transID, ['OutputDirectories'])
if not res['OK']:
self.log.error("Failed to obtain transformation directories", res['Message'])
return res
transDirectories = []
if res['Value']:
if not isinstance(res['Value'], list):
try:
transDirectories = ast.literal_eval(res['Value'])
except Exception as _:
# It can happen if the res['Value'] is '/a/b/c' instead of '["/a/b/c"]'
transDirectories.append(res['Value'])
else:
transDirectories = res['Value']
directories = self._addDirs(transID, transDirectories, directories)
if 'MetadataCatalog' in self.directoryLocations:
res = self.metadataClient.findDirectoriesByMetadata({self.transfidmeta: transID})
if not res['OK']:
self.log.error("Failed to obtain metadata catalog directories", res['Message'])
return res
transDirectories = res['Value']
directories = self._addDirs(transID, transDirectories, directories)
if not directories:
self.log.info("No output directories found")
directories = sorted(directories)
return S_OK(directories)
@classmethod
def _addDirs(cls, transID, newDirs, existingDirs):
""" append unique :newDirs: list to :existingDirs: list
:param self: self reference
:param int transID: transformationID
:param list newDirs: src list of paths
:param list existingDirs: dest list of paths
"""
for folder in newDirs:
transStr = str(transID).zfill(8)
if re.search(transStr, str(folder)):
if folder not in existingDirs:
existingDirs.append(os.path.normpath(folder))
return existingDirs
#############################################################################
#
# These are the methods for performing the cleaning of catalogs and storage
#
def cleanStorageContents(self, directory):
""" delete lfn dir from all active SE
:param self: self reference
:param sre directory: folder name
"""
if not self.activeStorages:
return S_OK()
self.log.verbose("Cleaning Storage Contents")
for storageElement in self.activeStorages:
res = self.__removeStorageDirectory(directory, storageElement)
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory(self, directory, storageElement):
""" wipe out all contents from :directory: at :storageElement:
:param self: self reference
:param str directory: path
:param str storageElement: SE name
"""
self.log.info('Removing the contents of %s at %s' % (directory, storageElement))
se = StorageElement(storageElement)
res = returnSingleResult(se.exists(directory))
if not res['OK']:
self.log.error("Failed to obtain existance of directory", res['Message'])
return res
exists = res['Value']
if not exists:
self.log.info("The directory %s does not exist at %s " % (directory, storageElement))
return S_OK()
res = returnSingleResult(se.removeDirectory(directory, recursive=True))
if not res['OK']:
self.log.error("Failed to remove storage directory", res['Message'])
return res
self.log.info("Successfully removed %d files from %s at %s" % (res['Value']['FilesRemoved'],
directory,
storageElement))
return S_OK()
def cleanCatalogContents(self, directory):
""" wipe out everything from catalog under folder :directory:
:param self: self reference
:params str directory: folder name
"""
self.log.verbose("Cleaning Catalog contents")
res = self.__getCatalogDirectoryContents([directory])
if not res['OK']:
return res
filesFound = res['Value']
if not filesFound:
self.log.info("No files are registered in the catalog directory %s" % directory)
return S_OK()
self.log.info("Attempting to remove %d possible remnants from the catalog and storage" % len(filesFound))
# Executing with shifter proxy
gConfigurationData.setOptionInCFG('/DIRAC/Security/UseServerCertificate', 'false')
res = DataManager().removeFile(filesFound, force=True)
gConfigurationData.setOptionInCFG('/DIRAC/Security/UseServerCertificate', 'true')
if not res['OK']:
return res
realFailure = False
for lfn, reason in res['Value']['Failed'].items():
if "File does not exist" in str(reason):
self.log.warn("File %s not found in some catalog: " % (lfn))
else:
self.log.error("Failed to remove file found in the catalog", "%s %s" % (lfn, reason))
realFailure = True
if realFailure:
return S_ERROR("Failed to remove all files found in the catalog")
return S_OK()
def __getCatalogDirectoryContents(self, directories):
""" get catalog contents under paths :directories:
:param self: self reference
:param list directories: list of paths in catalog
"""
self.log.info('Obtaining the catalog contents for %d directories:' % len(directories))
for directory in directories:
self.log.info(directory)
activeDirs = directories
allFiles = {}
fc = FileCatalog()
while activeDirs:
currentDir = activeDirs[0]
res = returnSingleResult(fc.listDirectory(currentDir))
activeDirs.remove(currentDir)
if not res['OK'] and 'Directory does not exist' in res['Message']: # FIXME: DFC should return errno
self.log.info("The supplied directory %s does not exist" % currentDir)
elif not res['OK']:
if "No such file or directory" in res['Message']:
self.log.info("%s: %s" % (currentDir, res['Message']))
else:
self.log.error("Failed to get directory %s content: %s" % (currentDir, res['Message']))
else:
dirContents = res['Value']
activeDirs.extend(dirContents['SubDirs'])
allFiles.update(dirContents['Files'])
self.log.info("Found %d files" % len(allFiles))
return S_OK(allFiles.keys())
def cleanTransformationLogFiles(self, directory):
""" clean up transformation logs from directory :directory:
:param self: self reference
:param str directory: folder name
"""
self.log.verbose("Removing log files found in the directory %s" % directory)
res = returnSingleResult(StorageElement(self.logSE).removeDirectory(directory))
if not res['OK']:
self.log.error("Failed to remove log files", res['Message'])
return res
self.log.info("Successfully removed transformation log directory")
return S_OK()
#############################################################################
#
# These are the functional methods for archiving and cleaning transformations
#
def removeTransformationOutput(self, transID):
""" This just removes any mention of the output data from the catalog and storage """
self.log.info("Removing output data for transformation %s" % transID)
res = self.getTransformationDirectories(transID)
if not res['OK']:
self.log.error('Problem obtaining directories for transformation %s with result "%s"' % (transID, res))
return S_OK()
directories = res['Value']
for directory in directories:
if not re.search('/LOG/', directory):
res = self.cleanCatalogContents(directory)
if not res['OK']:
return res
res = self.cleanStorageContents(directory)
if not res['OK']:
return res
self.log.info("Removed directories in the catalog and storage for transformation")
# Clean ALL the possible remnants found in the metadata catalog
res = self.cleanMetadataCatalogFiles(transID)
if not res['OK']:
return res
self.log.info("Successfully removed output of transformation %d" % transID)
# Change the status of the transformation to RemovedFiles
res = self.transClient.setTransformationParameter(transID, 'Status', 'RemovedFiles')
if not res['OK']:
self.log.error("Failed to update status of transformation %s to RemovedFiles" % (transID), res['Message'])
return res
self.log.info("Updated status of transformation %s to RemovedFiles" % (transID))
return S_OK()
def archiveTransformation(self, transID):
""" This just removes job from the jobDB and the transformation DB
:param self: self reference
:param int transID: transformation ID
"""
self.log.info("Archiving transformation %s" % transID)
# Clean the jobs in the WMS and any failover requests found
res = self.cleanTransformationTasks(transID)
if not res['OK']:
return res
# Clean the transformation DB of the files and job information
res = self.transClient.cleanTransformation(transID)
if not res['OK']:
return res
self.log.info("Successfully archived transformation %d" % transID)
# Change the status of the transformation to archived
res = self.transClient.setTransformationParameter(transID, 'Status', 'Archived')
if not res['OK']:
self.log.error("Failed to update status of transformation %s to Archived" % (transID), res['Message'])
return res
self.log.info("Updated status of transformation %s to Archived" % (transID))
return S_OK()
def cleanTransformation(self, transID):
""" This removes what was produced by the supplied transformation,
leaving only some info and log in the transformation DB.
"""
self.log.info("Cleaning transformation %s" % transID)
res = self.getTransformationDirectories(transID)
if not res['OK']:
self.log.error('Problem obtaining directories for transformation %s with result "%s"' % (transID, res))
return S_OK()
directories = res['Value']
# Clean the jobs in the WMS and any failover requests found
res = self.cleanTransformationTasks(transID)
if not res['OK']:
return res
# Clean the log files for the jobs
for directory in directories:
if re.search('/LOG/', directory):
res = self.cleanTransformationLogFiles(directory)
if not res['OK']:
return res
res = self.cleanCatalogContents(directory)
if not res['OK']:
return res
res = self.cleanStorageContents(directory)
if not res['OK']:
return res
# Clean ALL the possible remnants found in the BK
res = self.cleanMetadataCatalogFiles(transID)
if not res['OK']:
return res
# Clean the transformation DB of the files and job information
res = self.transClient.cleanTransformation(transID)
if not res['OK']:
return res
self.log.info("Successfully cleaned transformation %d" % transID)
res = self.transClient.setTransformationParameter(transID, 'Status', 'Cleaned')
if not res['OK']:
self.log.error("Failed to update status of transformation %s to Cleaned" % (transID), res['Message'])
return res
self.log.info("Updated status of transformation %s to Cleaned" % (transID))
return S_OK()
def cleanMetadataCatalogFiles(self, transID):
""" wipe out files from catalog """
res = self.metadataClient.findFilesByMetadata({self.transfidmeta: transID})
if not res['OK']:
return res
fileToRemove = res['Value']
if not fileToRemove:
self.log.info('No files found for transID %s' % transID)
return S_OK()
# Executing with shifter proxy
gConfigurationData.setOptionInCFG('/DIRAC/Security/UseServerCertificate', 'false')
res = DataManager().removeFile(fileToRemove, force=True)
gConfigurationData.setOptionInCFG('/DIRAC/Security/UseServerCertificate', 'true')
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
self.log.error("Failed to remove file found in metadata catalog", "%s %s" % (lfn, reason))
if res['Value']['Failed']:
return S_ERROR("Failed to remove all files found in the metadata catalog")
self.log.info("Successfully removed all files found in the BK")
return S_OK()
#############################################################################
#
# These are the methods for removing the jobs from the WMS and transformation DB
#
def cleanTransformationTasks(self, transID):
""" clean tasks from WMS, or from the RMS if it is a DataManipulation transformation
"""
self.log.verbose("Cleaning Transformation tasks of transformation %d" % transID)
res = self.__getTransformationExternalIDs(transID)
if not res['OK']:
return res
externalIDs = res['Value']
if externalIDs:
res = self.transClient.getTransformationParameters(transID, ['Type'])
if not res['OK']:
self.log.error("Failed to determine transformation type")
return res
transType = res['Value']
if transType in self.dataProcTTypes:
res = self.__removeWMSTasks(externalIDs)
else:
res = self.__removeRequests(externalIDs)
if not res['OK']:
return res
return S_OK()
def __getTransformationExternalIDs(self, transID):
""" collect all ExternalIDs for transformation :transID:
:param self: self reference
:param int transID: transforamtion ID
"""
res = self.transClient.getTransformationTasks(condDict={'TransformationID': transID})
if not res['OK']:
self.log.error("Failed to get externalIDs for transformation %d" % transID, res['Message'])
return res
externalIDs = [taskDict['ExternalID'] for taskDict in res["Value"]]
self.log.info("Found %d tasks for transformation" % len(externalIDs))
return S_OK(externalIDs)
def __removeRequests(self, requestIDs):
""" This will remove requests from the RMS system -
"""
rIDs = [int(long(j)) for j in requestIDs if long(j)]
for reqID in rIDs:
self.reqClient.deleteRequest(reqID)
return S_OK()
def __removeWMSTasks(self, transJobIDs):
""" wipe out jobs and their requests from the system
TODO: should check request status, maybe FTS files as well ???
:param self: self reference
:param list trasnJobIDs: job IDs
"""
# Prevent 0 job IDs
jobIDs = [int(j) for j in transJobIDs if int(j)]
allRemove = True
for jobList in breakListIntoChunks(jobIDs, 500):
res = self.wmsClient.killJob(jobList)
if res['OK']:
self.log.info("Successfully killed %d jobs from WMS" % len(jobList))
elif ("InvalidJobIDs" in res) and ("NonauthorizedJobIDs" not in res) and ("FailedJobIDs" not in res):
self.log.info("Found %s jobs which did not exist in the WMS" % len(res['InvalidJobIDs']))
elif "NonauthorizedJobIDs" in res:
self.log.error("Failed to kill %s jobs because not authorized" % len(res['NonauthorizedJobIDs']))
allRemove = False
elif "FailedJobIDs" in res:
self.log.error("Failed to kill %s jobs" % len(res['FailedJobIDs']))
allRemove = False
res = self.wmsClient.deleteJob(jobList)
if res['OK']:
self.log.info("Successfully removed %d jobs from WMS" % len(jobList))
elif ("InvalidJobIDs" in res) and ("NonauthorizedJobIDs" not in res) and ("FailedJobIDs" not in res):
self.log.info("Found %s jobs which did not exist in the WMS" % len(res['InvalidJobIDs']))
elif "NonauthorizedJobIDs" in res:
self.log.error("Failed to remove %s jobs because not authorized" % len(res['NonauthorizedJobIDs']))
allRemove = False
elif "FailedJobIDs" in res:
self.log.error("Failed to remove %s jobs" % len(res['FailedJobIDs']))
allRemove = False
if not allRemove:
return S_ERROR("Failed to remove all remnants from WMS")
self.log.info("Successfully removed all tasks from the WMS")
if not jobIDs:
self.log.info("JobIDs not present, unable to remove asociated requests.")
return S_OK()
failed = 0
failoverRequests = {}
res = self.reqClient.getRequestIDsForJobs(jobIDs)
if not res['OK']:
self.log.error("Failed to get requestID for jobs.", res['Message'])
return res
failoverRequests.update(res['Value']['Successful'])
if not failoverRequests:
return S_OK()
for jobID, requestID in res['Value']['Successful'].items():
# Put this check just in case, tasks must have associated jobs
if jobID == 0 or jobID == '0':
continue
res = self.reqClient.deleteRequest(requestID)
if not res['OK']:
self.log.error("Failed to remove request from RequestDB", res['Message'])
failed += 1
else:
self.log.verbose("Removed request %s associated to job %d." % (requestID, jobID))
if failed:
self.log.info("Successfully removed %s requests" % (len(failoverRequests) - failed))
self.log.info("Failed to remove %s requests" % failed)
return S_ERROR("Failed to remove all the request from RequestDB")
self.log.info("Successfully removed all the associated failover requests")
return S_OK()
|
arrabito/DIRAC
|
TransformationSystem/Agent/TransformationCleaningAgent.py
|
Python
|
gpl-3.0
| 27,459
|
[
"DIRAC"
] |
6f34db20ef2f361bc06388c8a3f51e58f3fae6c2e9958191dde94ff91e5e22c2
|
#!/usr/bin/env python
import argparse
import logging
import sys
from BCBio import GFF
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqFeature import (
FeatureLocation,
SeqFeature
)
from Bio.SeqRecord import SeqRecord
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def parse_xmfa(xmfa):
"""Simple XMFA parser until https://github.com/biopython/biopython/pull/544
"""
current_lcb = []
current_seq = {}
for line in xmfa.readlines():
if line.startswith('#'):
continue
if line.strip() == '=':
if 'id' in current_seq:
current_lcb.append(current_seq)
current_seq = {}
yield current_lcb
current_lcb = []
else:
line = line.strip()
if line.startswith('>'):
if 'id' in current_seq:
current_lcb.append(current_seq)
current_seq = {}
data = line.strip().split()
id, loc = data[1].split(':')
start, end = loc.split('-')
current_seq = {
'rid': '_'.join(data[1:]),
'id': id,
'start': int(start),
'end': int(end),
'strand': 1 if data[2] == '+' else -1,
'seq': ''
}
else:
current_seq['seq'] += line.strip()
def _percent_identity(a, b):
"""Calculate % identity, ignoring gaps in the host sequence
"""
match = 0
mismatch = 0
for char_a, char_b in zip(list(a), list(b)):
if char_a == '-':
continue
if char_a == char_b:
match += 1
else:
mismatch += 1
if match + mismatch == 0:
return 0
return 100 * float(match) / (match + mismatch)
def _id_tn_dict(sequences):
"""Figure out sequence IDs
"""
label_convert = {}
if sequences is not None:
if len(sequences) == 1:
for i, record in enumerate(SeqIO.parse(sequences[0], 'fasta')):
label_convert[str(i + 1)] = record.id
else:
for i, sequence in enumerate(sequences):
for record in SeqIO.parse(sequence, 'fasta'):
label_convert[str(i + 1)] = record.id
continue
return label_convert
def convert_xmfa_to_gff3(xmfa_file, relative_to='1', sequences=None, window_size=1000):
label_convert = _id_tn_dict(sequences)
lcbs = parse_xmfa(xmfa_file)
records = [SeqRecord(Seq("A"), id=label_convert.get(relative_to, relative_to))]
for lcb in lcbs:
ids = [seq['id'] for seq in lcb]
# Doesn't match part of our sequence
if relative_to not in ids:
continue
# Skip sequences that are JUST our "relative_to" genome
if len(ids) == 1:
continue
parent = [seq for seq in lcb if seq['id'] == relative_to][0]
others = [seq for seq in lcb if seq['id'] != relative_to]
for other in others:
other['feature'] = SeqFeature(
FeatureLocation(parent['start'], parent['end'] + 1),
type="match", strand=parent['strand'],
qualifiers={
"source": "progressiveMauve",
"target": label_convert.get(other['id'], other['id']),
"ID": label_convert.get(other['id'], 'xmfa_' + other['rid'])
}
)
for i in range(0, len(lcb[0]['seq']), window_size):
block_seq = parent['seq'][i:i + window_size]
real_window_size = len(block_seq)
real_start = abs(parent['start']) - parent['seq'][0:i].count('-') + i
real_end = real_start + real_window_size - block_seq.count('-')
if (real_end - real_start) < 10:
continue
if parent['start'] < 0:
strand = -1
else:
strand = 1
for other in others:
pid = _percent_identity(block_seq, other['seq'][i:i + real_window_size])
# Ignore 0% identity sequences
if pid == 0:
continue
# Support for Biopython 1.68 and above, which removed sub_features
if not hasattr(other['feature'], "sub_features"):
other['feature'].sub_features = []
other['feature'].sub_features.append(
SeqFeature(
FeatureLocation(real_start, real_end),
type="match_part", strand=strand,
qualifiers={
"source": "progressiveMauve",
'score': pid
}
)
)
for other in others:
records[0].features.append(other['feature'])
return records
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert XMFA alignments to gff3', prog='xmfa2gff3')
parser.add_argument('xmfa_file', type=argparse.FileType('r'), help='XMFA File')
parser.add_argument('--window_size', type=int, help='Window size for analysis', default=1000)
parser.add_argument('--relative_to', type=str, help='Index of the parent sequence in the MSA', default='1')
parser.add_argument('--sequences', type=argparse.FileType('r'), nargs='+',
help='Fasta files (in same order) passed to parent for reconstructing proper IDs')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
result = convert_xmfa_to_gff3(**vars(args))
GFF.write(result, sys.stdout)
|
Delphine-L/tools-iuc
|
tools/progressivemauve/xmfa2gff3.py
|
Python
|
mit
| 5,800
|
[
"Biopython"
] |
33ca01db92a1d942f1b9a488b55505cb53fec4ce849fd1e17fc16e2ad17e5b31
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division # For type safety in gaussian_kl_divergence
from functools import partial
from math import erfc
import numpy as np
from numpy.random import RandomState
import kl
import threshold
def gaussian_kl_divergence(mu1, s1, mu2, s2):
"Return KL(N(mu1,s1)||N(mu2,s2))"
# http://stats.stackexchange.com/a/7443/40686
return np.log(s2 / s1) + ((s1**2 + (mu1 - mu2)**2) / (2 * s2**2)) - 0.5
def gaussian_log_pdf(mu, s):
def lpdf(x):
normalizing_constant = -(np.log(2 * np.pi) / 2) - np.log(s)
return normalizing_constant - ((x - mu)**2 / (2 * s**2))
return lpdf
def compute_kullback_leibler_check_statistic(n=100, prngstate=None):
"""Compute the lowest of the survival function and the CDF of the exact KL
divergence KL(N(mu1,s1)||N(mu2,s2)) w.r.t. the sample distribution of the
KL divergence drawn by computing log(P(x|N(mu1,s1)))-log(P(x|N(mu2,s2)))
over a sample x~N(mu1,s1). If we are computing the KL divergence
accurately, the exact value should fall squarely in the sample, and the
tail probabilities should be relatively large.
"""
if prngstate is None:
raise TypeError('Must explicitly specify numpy.random.RandomState')
mu1 = mu2 = 0
s1 = 1
s2 = 2
exact = gaussian_kl_divergence(mu1, s1, mu2, s2)
sample = prngstate.normal(mu1, s1, n)
lpdf1 = gaussian_log_pdf(mu1, s1)
lpdf2 = gaussian_log_pdf(mu2, s2)
estimate, std = kl.kullback_leibler(sample, lpdf1, lpdf2)
# This computes the minimum of the left and right tail probabilities of the
# exact KL divergence vs a gaussian fit to the sample estimate. There is a
# distinct negative skew to the samples used to compute `estimate`, so this
# statistic is not uniform. Nonetheless, we do not expect it to get too
# small.
return erfc(abs(exact - estimate) / std) / 2
def kl_test_stat():
prngstate = RandomState(17)
return partial(
compute_kullback_leibler_check_statistic, prngstate=prngstate)
def compute_kl_threshold():
"""Compute the values used in test_kullback_leibler
>>> threshold.compute_sufficiently_stringent_threshold(
kl_test_stat(), 6, 1e-20)
...
TestThreshold(
threshold=4.3883148424367044e-13,
failprob=9.724132259513859e-21,
sample_size=252135
)
This means that after generating 252135 check statistics, it was found that
the least value of six samples will be less than 4.3883148424367044e-13
with probability less than 9.724132259513859e-21 (< 1e-20).
"""
return threshold.compute_sufficiently_stringent_threshold(
kl_test_stat(), 6, 1e-20)
def test_kullback_leibler():
"""Check kullback_leibler_check_statistic doesn't give absurdly low
values."""
# See compute_kl_threshold for derivation
kl_threshold = threshold.TestThreshold(
threshold=4.3883148424367044e-13,
failprob=9.724132259513859e-21,
sample_size=252135
)
threshold.check_generator(kl_test_stat(), 6, kl_threshold.threshold, 1e-20)
|
probcomp/bayeslite
|
tests/test_kl.py
|
Python
|
apache-2.0
| 3,744
|
[
"Gaussian"
] |
227df0b06e77de219806d9583a717dda6447756e902e1c7a90bb0db49ab8ce12
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Demonstrates the construction of a rigid object by means of the
``VIRTUAL_SITES_RELATIVE`` feature.
"""
import enum
import math
import numpy as np
import espressomd
required_features = ["VIRTUAL_SITES_RELATIVE", "MASS", "ROTATIONAL_INERTIA"]
espressomd.assert_features(required_features)
import espressomd.virtual_sites
import espressomd.rotation
system = espressomd.System(box_l=[10.0] * 3)
system.virtual_sites = espressomd.virtual_sites.VirtualSitesRelative()
system.time_step = 0.01
system.thermostat.set_langevin(kT=1.0, gamma=20.0, seed=42)
class ParticleTypes(enum.IntEnum):
CENTER = enum.auto()
BRANCH = enum.auto()
branch_len = 5
center = 0.5 * system.box_l
# Place six branches, pointing +/-x +/-y and +/-z.
# Note that we do not make the particles virtual at this point.
# The script uses center of mass an moment of inertia analysis routines
# to obtain the position and inertia moments of the central particle.
# Once a particle is made virtual, it will no longer contribute to
# observables involving mass. Virtual sites are not integrated via
# Newton's equation of motion and therefore do not have a meaningful mass.
for direction in np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]):
for n in range(branch_len):
system.part.add(pos=center + (n + 1) * direction,
type=ParticleTypes.BRANCH.value)
system.part.add(pos=center - (n + 1) * direction,
type=ParticleTypes.BRANCH.value)
center_of_mass = system.analysis.center_of_mass(
p_type=ParticleTypes.BRANCH.value)
print("Center of mass:", center_of_mass)
# if using multiple nodes, we need to change min_global_cut to the largest
# separation
max_inter = np.max(np.linalg.norm(system.part[:].pos - center_of_mass, axis=1))
system.min_global_cut = max_inter
principal_moments, principal_axes = espressomd.rotation.diagonalized_inertia_tensor(
system.part[:].pos, system.part[:].mass)
# in this simple case, the cluster has principal axes aligned with the box
print("Principal moments: {}, principal axes tensor: {}".format(
principal_moments, principal_axes))
# if we rotate the arms, we have to make sure that we set the quaternion of the
# center particle accordingly while setting the principal moments of inertia
AXIS = np.array([1., 0., 0.])
ANGLE = np.pi / 4.0
def rotate_vector(vector, axis, angle):
return axis * np.dot(axis, vector) + math.cos(angle) * np.cross(
np.cross(axis, vector), axis) + math.sin(angle) * np.cross(axis, vector)
for p in system.part:
p.pos = rotate_vector(p.pos - center_of_mass, AXIS, ANGLE) + center_of_mass
principal_moments, principal_axes = espressomd.rotation.diagonalized_inertia_tensor(
system.part[:].pos, system.part[:].mass)
# after rotating the whole object the principal axes changed
print("After rotating: principal moments: {}, principal axes tensor: {}".format(
principal_moments, principal_axes))
# place center bead
p_center = system.part.add(
pos=center_of_mass, mass=branch_len * 6 + 1, rinertia=principal_moments,
rotation=[1, 1, 1], type=ParticleTypes.CENTER.value, quat=espressomd.rotation.matrix_to_quat(principal_axes))
# Relate the particles that make up the rigid body to the central particle.
# This will also mark them as `virtual = True`
for p in system.part.select(type=ParticleTypes.BRANCH.value):
p.vs_auto_relate_to(p_center.id)
for frame in range(200):
system.integrator.run(100)
print("Simulation finished")
|
KaiSzuttor/espresso
|
samples/rigid_body.py
|
Python
|
gpl-3.0
| 4,204
|
[
"ESPResSo"
] |
628c65fa5ca8c90c8b999f00941a69225d2eef6970e082b15c45fd781cb8d8f2
|
from DIRAC.Core.Base.Script import Script
from DIRAC.Core.Utilities.Decorators import deprecated
# TODO: remove it in 8.1
@deprecated("DIRACScript is deprecated, use 'from DIRAC.Core.Base.Script import Script' instead.")
class DIRACScript(Script):
pass
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/DIRACScript.py
|
Python
|
gpl-3.0
| 258
|
[
"DIRAC"
] |
79efe7c09a0772ba59266c486ed8210deea1db04142e8023e7871be1b6f26cdd
|
usage = """ compute pixels that should be colored for contours """
import healpy as hp
import numpy as np
#=================================================
def contour_pix(map, vals, all_neighbours=True):
"""
given a healpix map (map) and a set of values, we find and return lists of pixels that constitute the boarder of those sets
"""
npix = len(map)
nside = hp.npix2nside(npix)
### separate into pixel sets based in p_values
pix = np.arange(npix)
boarders = []
for val in vals:
_pix = pix[map>=val] ### pull out included pixels
truth = np.zeros((npix,), bool)
truth[_pix] = True
boarder = np.zeros((npix,),bool) ### defines which modes are in the boarder
for ipix in _pix:
if all_neighbours:
boarder[ipix] = not truth[[n for n in hp.get_all_neighbours(nside, ipix) if n != -1]].all()
else:
boarder[ipix] = not truth[[n for n in hp.get_neighbours(nside, ipix)[0] if n != -1]].all()
boarders.append( pix[boarder] )
return boarders
###
def projplot_contour_pix(map, vals, ax, color="w", markersize=0.001, marker=".", linestyle="none", linewidth=1, alpha=0.25, verbose=False):
"""
computes the countour pixels and colors them with projplot
"""
npix = len(map)
nside = hp.npix2nside(npix)
if verbose:
print "finding boarder pixels"
i=0
for boarder in contour_pix(map, vals):
if verbose:
print "%d / %d : %f"%(i+1, len(vals), vals[i])
i+=1
for pos in boarder_to_lines(boarder, nside, verbose=verbose):
brdr = ax.projplot(pos, color=color, alpha=alpha, linestyle=linestyle, marker=marker, linewidth=linewidth)[0]
brdr.set_markersize(markersize)
###
def boarder_to_lines(boarder, nside, verbose=False):
"""
takes a list of pixels (boarder) and converts it to a list of positions represnting a line tracing the boarder
"""
#==============================================================================================
### dot every point along the ring
# theta, phi = hp.pix2ang(nside, boarder)
# pos = [list(theta), list(phi)]
# return [pos]
#==============================================================================================
### walk around the rings
npix = hp.nside2npix(nside)
pix = np.arange(npix)
boarder_truth = np.zeros((npix,),bool) ### original boarder
boarder_truth[boarder] = True
truth = np.zeros((npix,),bool) ### we change this to denote which pixels have not been visited
truth[boarder] = True
visit = np.zeros((npix,),bool) ### pixels we have visited
ipix = pix[truth][0] ### pull out the first pixel
truth[ipix] = False ### turn that pixel off
visit[ipix] = True
line = [ipix] ### start of this line
lines = []
### iterate over boarder
while truth.any():
if verbose:
print "%d remaining pixels"%np.sum(truth)
print "ipix : %d"%ipix
for n in hp.get_all_neighbours(nside, ipix):
if n == -1: ### neighbour doesn't exist
pass
elif boarder_truth[n]: ### neighbour is in boarder
if verbose:
print "\t%d in boarder"%n
if truth[n]: ### pixel has not been visited
if verbose:
print "\t\thas not been visited"
line.append( n )
ipix = n
truth[n] = False
visit[n] = True
break
else: ### pixel has been visited. End line and start another
if verbose:
print "\t\thas been visited"
line.append( n )
lines.append( line )
truth[n] = False
visit[n] = True
### find a new starting point!
for _ipix in pix[visit]: ### all pixels we've visited
for _n in hp.get_all_neighbours(nside, _ipix):
if truth[_n]: ### neighbours a pixel we haven't seen
if verbose:
print "\t\t\tnew spur at %d"%_n
line = [_ipix, _n]
truth[_n] = False
visit[_n] = True
ipix = _n
break
else: ### didn't find any new spurs starting at _ipix, continue
continue
break ### we did find a new spur!
else: ### didn't find any new spurs from any pixel we have visited
if verbose:
print "\t\t\tno new spur found"
if truth.any(): ### there are still pixels to be found
ipix = pix[truth][0]
if verbose:
print "\t\t\tnew ring at %d"%ipix
truth[ipix] = False
visit[ipix] = True
line = [ipix]
break
else: ### neighbour is not in boarder
if verbose:
print "\t%d not in boarder"%n
else: ### no neighbours are in boarder_truth. How did we get to this pixel?
raise StandardError, "no neighbours aroudn %d found in boarder? How did we get to this pixel?"%ipix
### just end the line and start another
### check that we've visited all pixels
if (visit != boarder_truth).any():
raise StandardError, "visit != boarder_truth. Somehow we missed pixels?"
### close the remaining line
for n in hp.get_all_neighbours(nside, ipix):
if n == -1:
pass
elif boarder_truth[n]:
line.append( n ) ### close the line
break
else:
raise StandardError, "hanging contour line ending at %d. How did we get to this pixel?"%ipix
lines.append( line )
### transform pixel numbers into coords for plotting
pos = []
for line in lines:
theta, phi = hp.pix2ang(nside, line)
pos.append( (list(theta), list(phi)) )
### remove spurious lines?
### these may be caused by cutting a corner and then coming back and going the other way around.
### a characterisitc would be that there are 3 points in the line, and all 3 points are neighbours of one another
return pos
|
reedessick/bayesburst
|
contours.py
|
Python
|
gpl-2.0
| 5,435
|
[
"VisIt"
] |
aaecca98b35654d828f2e4d65239cd285bdb8c9de2e7b4604141824e7603388e
|
""" Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises, rand, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy._lib._version import NumpyVersion
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform','norm','lognorm','expon','beta',
'powerlaw','bradford','burr','fisk','cauchy','halfcauchy',
'foldcauchy','gamma','gengamma','loggamma',
'alpha','anglit','arcsine','betaprime','dgamma',
'exponnorm', 'exponweib','exponpow','frechet_l','frechet_r',
'gilbrat','f','ncf','chi2','chi','nakagami','genpareto',
'genextreme','genhalflogistic','pareto','lomax','halfnorm',
'halflogistic','fatiguelife','foldnorm','ncx2','t','nct',
'weibull_min','weibull_max','dweibull','maxwell','rayleigh',
'genlogistic', 'logistic','gumbel_l','gumbel_r','gompertz',
'hypsecant', 'laplace', 'reciprocal','triang','tukeylambda',
'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm',
'rice']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
D,pval = stats.kstest(dist,'', args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(dist,'',args=args, N=1000)
# if (pval < alpha):
# D,pval = stats.kstest(dist,'',args=args, N=1000)
assert_(pval > alpha, msg="D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'frechet':
args = tuple(2*rand(1))+(0,)+tuple(2*rand(2))
elif dist == 'triang':
args = tuple(rand(nargs))
elif dist == 'reciprocal':
vals = rand(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0+rand(nargs))
else:
args = tuple(1.0+rand(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k,l,s,x):
vm = stats.vonmises(k,loc=l,scale=s)
assert_almost_equal(vm.pdf(x),vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k,l,s,x):
vm = stats.vonmises(k,loc=l,scale=s)
assert_almost_equal(vm.cdf(x) % 1,vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0,1,numpy.pi,10,100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5,30,size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5,30,size=(2,50))
assert_(numpy.shape(vals) == (2,50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15,46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15,46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k,5,30)
assert_array_almost_equal(vals,out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30,k >= 5],[1.0,(k-5.0+1)/(30-5.0)],0)
vals = stats.randint.cdf(x,5,30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100,1)
vals2 = stats.binom.pmf(0, 100,0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0,1,1)
assert_equal(val,0)
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1,2,3],0.5)
assert_array_almost_equal(vals,[0.5,0.25,0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1,2,3], 0.5))
vals2 = stats.geom.logpmf([1,2,3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
class TestGennorm(TestCase):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(TestCase):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
raise SkipTest('truncnorm rvs is know to fail at extreme tails')
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
exspected = -2239.771 # From R
assert_almost_equal(result, exspected, decimal=3)
class TestLoggamma(TestCase):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
class TestGenpareto(TestCase):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
assert_equal(stats.genpareto.a, 0.)
assert_(np.isposinf(stats.genpareto.b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3,-2,-1,0,1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3,-2,-1,0,1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestPoisson(TestCase):
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0,0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0.,0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
assert_allclose(mvsk,
[0.05461496450, 0.0001723162534, 1.020362676, 2.055616582])
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
class TestF(TestCase):
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
@dec.knownfailureif(True, 'f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1,0,1,2,3,4]
probability = [0.0,0.3,0.4,0.0,0.3,0.0]
samples = 1000
r = stats.rv_discrete(name='sample',values=(states,probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s,p in zip(states,probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0),1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestExponNorm(TestCase):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
assert_almost_equal(simps(stats.genexpon.pdf(numpy.arange(0,10,0.01),
0.5, 0.5, 2.0),
dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5)
class TestSkellam(TestCase):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(TestCase):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
with np.errstate(divide='ignore'):
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
class TestBeta(TestCase):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0,1,0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0,0.5,1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
class TestBetaPrime(TestCase):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(TestCase):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0,1)
assert_almost_equal(logpdf, 0)
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, 14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, 14)
class TestArrayArgument(TestCase): # test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), size=(10,5))
assert_equal(rvs.shape, (10,5))
class TestDocstring(TestCase):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(TestCase):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5,0.2,0.3]
qk = [0.1,0.25,0.65]
eself = stats.entropy(pk,pk)
edouble = stats.entropy(pk,qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def TestArgsreduce():
a = array([1,3,2,1,2,3,3])
b,c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3,2,2,3,3])
assert_array_equal(c, [2,2,2,2,2])
b,c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b,c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf']
@dec.slow
def test_fit(self):
def check(func, dist, args, alpha):
if dist in self.skip:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
if dist == 'frechet':
assert_(len(vals) == len(args))
assert_(len(vals2) == len(args))
else:
assert_(len(vals) == 2+len(args))
assert_(len(vals2) == 2+len(args))
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
@dec.slow
def test_fix_fit(self):
def check(func, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res,floc=0)
vals2 = distfunc.fit(res,fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[0.25888672, 0, 20], atol=1e-5)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0,0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(TestCase):
# Test that a frozen distribution gives the same results as the original object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2,loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
rv = stats.genpareto(c=-0.1)
a, b = rv.dist.a, rv.dist.b
assert_equal([a, b], [0., 10.])
assert_equal([rv.a, rv.b], [0., 10.])
stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b
assert_equal([rv.dist.a, rv.dist.b], [a, b])
assert_equal([rv.a, rv.b], [a, b])
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(TestCase):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10,5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10,5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10,10), loc=5.,
scale=2.,lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10,10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2,2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
class TestNct(TestCase):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4,7)[:,None], np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def text_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(TestCase):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(TestCase):
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
class TestExponWeib(TestCase):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestRdist(TestCase):
@dec.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982),0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983),0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see http://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', RuntimeWarning)
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
olderr = np.seterr(divide='ignore')
try:
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
finally:
np.seterr(**olderr)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
mv3 = stats.lognorm.stats([2, 2.4, -1]) # -1 is not a legal shape parameter
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(TestCase):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling" is
# ignoring *args and looking for ``extra_kwarg`` and using that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(TestCase):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall('logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall('logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
try:
_distr3_gen(name='dummy')
except TypeError:
pass
else:
raise AssertionError('TypeError not raised.')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_docstrings():
badones = [',\s*,', '\(\s*,', '^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9,1),1)
assert_almost_equal(p, 9.0, decimal=15)
if __name__ == "__main__":
run_module_suite()
|
petebachant/scipy
|
scipy/stats/tests/test_distributions.py
|
Python
|
bsd-3-clause
| 87,809
|
[
"Gaussian"
] |
5854e94c676e2395aa18e0ebc02ca0e108850690b17a0e6199b16eb52925f759
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that encode the sequence of PSI module
calls for each of the *name* values of the energy(), optimize(),
response(), and frequency() function. *name* can be assumed lowercase by here.
"""
from __future__ import print_function
from __future__ import absolute_import
import shutil
import os
import subprocess
import re
import numpy as np
from psi4 import extras
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver.p4util.exceptions import *
from psi4.driver.molutil import *
from .roa import *
from . import proc_util
from . import empirical_dispersion
from . import dft_functional
from . import mcscf
# never import driver, wrappers, or aliases into this file
# ATTN NEW ADDITIONS!
# consult http://psicode.org/psi4manual/master/proc_py.html
def select_mp2(name, **kwargs):
"""Function selecting the algorithm for a MP2 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/dfmp2/detci/fnocc
# MP2_TYPE exists largely for py-side reasoning, so must manage it
# here rather than passing to c-side unprepared for validation
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference in ['RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'DFMP2']:
func = run_dfmp2
if func is None:
raise ManagedMethodError(['select_mp2', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP2 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/dfmp2
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc_gradient
elif module in ['', 'DFMP2']:
func = run_dfmp2_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp2_gradient', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2_property(name, **kwargs):
"""Function selecting the algorithm for a MP2 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only dfmp2 for now
func = None
if reference == 'RHF':
if mtd_type == 'DF':
#if module == 'OCC':
# func = run_dfocc_property
if module in ['', 'DFMP2']:
func = run_dfmp2_property
#elif reference == 'UHF':
# if mtd_type == 'DF':
# if module in ['', 'OCC']:
# func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_mp2_property', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2(name, **kwargs):
"""Function selecting the algorithm for an OMP2 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp2', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP2 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp2_gradient', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2_property(name, **kwargs):
"""Function selecting the algorithm for an OMP2 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp2_property', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp3(name, **kwargs):
"""Function selecting the algorithm for a MP3 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/fnocc/detci
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI': # no default for this case
func = run_detci
elif module in ['']:
core.print_out("""\nThis method is available inefficiently as a """
"""byproduct of a CISD computation.\n Add "set """
"""qc_module detci" to input to access this route.\n""")
if func is None:
raise ManagedMethodError(['select_mp3', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp3_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP3 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp3_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3(name, **kwargs):
"""Function selecting the algorithm for an OMP3 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp3', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP3 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp3_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2p5(name, **kwargs):
"""Function selecting the algorithm for a MP2.5 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_mp2p5', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2p5_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP2.5 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp2p5', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_lccd(name, **kwargs):
"""Function selecting the algorithm for a LCCD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'OCC':
func = run_occ
elif module in ['', 'FNOCC']:
func = run_cepa
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_lccd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_lccd_gradient(name, **kwargs):
"""Function selecting the algorithm for a LCCD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_lccd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd(name, **kwargs):
"""Function selecting the algorithm for an OLCCD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_olccd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd_gradient(name, **kwargs):
"""Function selecting the algorithm for an OLCCD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_olccd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_fnoccsd(name, **kwargs):
"""Function selecting the algorithm for a FNO-CCSD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'FNOCC']:
func = run_fnocc
elif mtd_type == 'DF':
if module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module in ['', 'FNOCC']:
func = run_fnodfcc
if func is None:
raise ManagedMethodError(['select_fnoccsd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd(name, **kwargs):
"""Function selecting the algorithm for a CCSD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy/detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'CCENERGY']:
func = run_ccenergy
if func is None:
raise ManagedMethodError(['select_ccsd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_gradient(name, **kwargs):
"""Function selecting the algorithm for a CCSD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
if func is None:
raise ManagedMethodError(['select_ccsd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_fnoccsd_t_(name, **kwargs):
"""Function selecting the algorithm for a FNO-CCSD(T) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'FNOCC']:
func = run_fnocc
elif mtd_type == 'DF':
if module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module in ['', 'FNOCC']:
func = run_fnodfcc
if func is None:
raise ManagedMethodError(['select_fnoccsd_t_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_t_(name, **kwargs):
"""Function selecting the algorithm for a CCSD(T) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'FNOCC':
func = run_fnocc
elif module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif reference in ['UHF', 'ROHF']:
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
if func is None:
raise ManagedMethodError(['select_ccsd_t_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_t__gradient(name, **kwargs):
"""Function selecting the algorithm for a CCSD(T) gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only ccenergy
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
if func is None:
raise ManagedMethodError(['select_ccsd_t__gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_at_(name, **kwargs):
"""Function selecting the algorithm for a CCSD(AT) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_ccsd_at_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_cisd(name, **kwargs):
"""Function selecting the algorithm for a CISD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CI_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'FNOCC']:
func = run_cepa
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module in ['', 'DETCI']:
func = run_detci
if func is None:
raise ManagedMethodError(['select_cisd', name, 'CI_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp4(name, **kwargs):
"""Function selecting the algorithm for a MP4 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'FNOCC']:
func = run_fnocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI': # no default for this case
func = run_detci
elif module in ['']:
core.print_out("""\nThis method is available inefficiently as a """
"""byproduct of a CISDT computation.\n Add "set """
"""qc_module detci" to input to access this route.\n""")
if func is None:
raise ManagedMethodError(['select_mp4', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def scf_wavefunction_factory(reference, ref_wfn, functional=None):
"""Builds the correct wavefunction from the provided information
"""
if core.has_option_changed("SCF", "DFT_DISPERSION_PARAMETERS"):
modified_disp_params = core.get_option("SCF", "DFT_DISPERSION_PARAMETERS")
else:
modified_disp_params = None
# Figure out functional
if functional is None:
superfunc, disp_type = dft_functional.build_superfunctional(core.get_option("SCF", "DFT_FUNCTIONAL"))
elif isinstance(functional, core.SuperFunctional):
superfunc = functional
disp_type = False
elif isinstance(functional, (str, unicode)):
superfunc, disp_type = dft_functional.build_superfunctional(functional)
else:
raise ValidationError("Functional %s is not understood" % str(functional))
# Build the wavefunction
core.prepare_options_for_module("SCF")
if reference in ["RHF", "RKS"]:
wfn = core.RHF(ref_wfn, superfunc)
elif reference == "ROHF":
wfn = core.ROHF(ref_wfn, superfunc)
elif reference in ["UHF", "UKS"]:
wfn = core.UHF(ref_wfn, superfunc)
elif reference == "CUHF":
wfn = core.CUHF(ref_wfn, superfunc)
else:
raise ValidationError("SCF: Unknown reference (%s) when building the Wavefunction." % reference)
if disp_type:
wfn._disp_functor = empirical_dispersion.EmpericalDispersion(disp_type[0], disp_type[1],
tuple_params = modified_disp_params)
wfn._disp_functor.print_out()
# Set the multitude of SAD basis sets
if (core.get_option("SCF", "SCF_TYPE") == "DF") or \
(core.get_option("SCF", "DF_SCF_GUESS") and (core.get_option("SCF", "SCF_TYPE") == "DIRECT")):
aux_basis = core.BasisSet.build(wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=wfn.basisset().has_puream())
wfn.set_basisset("DF_BASIS_SCF", aux_basis)
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
decon_basis = core.BasisSet.build(wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=wfn.basisset().has_puream())
wfn.set_basisset("BASIS_RELATIVISTIC", decon_basis)
if (core.get_option("SCF", "GUESS") == "SAD"):
sad_basis_list = core.BasisSet.build(wfn.molecule(), "ORBITAL",
core.get_global_option("BASIS"),
puream=wfn.basisset().has_puream(),
return_atomlist=True)
wfn.set_sad_basissets(sad_basis_list)
if (core.get_option("SCF", "SAD_SCF_TYPE") == "DF"):
sad_fitting_list = core.BasisSet.build(wfn.molecule(), "DF_BASIS_SAD",
core.get_option("SCF", "DF_BASIS_SAD"),
puream=wfn.basisset().has_puream(),
return_atomlist=True)
wfn.set_sad_fitting_basissets(sad_fitting_list)
return wfn
def scf_helper(name, **kwargs):
"""Function serving as helper to SCF, choosing whether to cast
up or just run SCF with a standard guess. This preserves
previous SCF options set by other procedures (e.g., SAPT
output file types for SCF).
"""
optstash = p4util.OptionsState(
['PUREAM'],
['BASIS'],
['QMEFP'],
['DF_BASIS_SCF'],
['SCF', 'GUESS'],
['SCF', 'DF_INTS_IO'],
['SCF', 'SCF_TYPE'] # Hack: scope gets changed internally with the Andy trick
)
optstash2 = p4util.OptionsState(
['BASIS'],
['DF_BASIS_SCF'],
['SCF', 'SCF_TYPE'],
['SCF', 'DF_INTS_IO'])
# Grab a few kwargs
use_c1 = kwargs.get('use_c1', False)
scf_molecule = kwargs.get('molecule', core.get_active_molecule())
read_orbitals = core.get_option('SCF', 'GUESS') is "READ"
ref_wfn = kwargs.pop('ref_wfn', None)
if ref_wfn is not None:
raise Exception("Cannot supply a SCF wavefunction a ref_wfn.")
# Second-order SCF requires non-symmetric density matrix support
if core.get_option('SCF', 'SOSCF'):
proc_util.check_non_symmetric_jk_density("Second-order SCF")
# sort out cast_up settings. no need to stash these since only read, never reset
cast = False
if core.has_option_changed('SCF', 'BASIS_GUESS'):
cast = core.get_option('SCF', 'BASIS_GUESS')
if p4util.yes.match(str(cast)):
cast = True
elif p4util.no.match(str(cast)):
cast = False
if cast:
# A use can set "BASIS_GUESS" to True and we default to 3-21G
if cast is True:
guessbasis = '3-21G'
else:
guessbasis = cast
core.set_global_option('BASIS', guessbasis)
castdf = core.get_option('SCF', 'SCF_TYPE') == 'DF'
if core.has_option_changed('SCF', 'DF_BASIS_GUESS'):
castdf = core.get_option('SCF', 'DF_BASIS_GUESS')
if p4util.yes.match(str(castdf)):
castdf = True
elif p4util.no.match(str(castdf)):
castdf = False
if castdf:
core.set_local_option('SCF', 'SCF_TYPE', 'DF')
core.set_local_option('SCF', 'DF_INTS_IO', 'none')
# Figure out the fitting basis set
if castdf is True:
core.set_global_option('DF_BASIS_SCF', '')
elif isinstance(castdf, (unicode, str)):
core.set_global_option('DF_BASIS_SCF', castdf)
else:
raise ValidationError("Unexpected castdf option (%s)." % castdf)
# Switch to the guess namespace
namespace = core.IO.get_default_namespace()
guesspace = namespace + '.guess'
if namespace == '':
guesspace = 'guess'
core.IO.set_default_namespace(guesspace)
# Print some info about the guess
core.print_out('\n')
p4util.banner('Guess SCF, %s Basis' % (guessbasis))
core.print_out('\n')
# sort out broken_symmetry settings.
if 'brokensymmetry' in kwargs:
multp = scf_molecule.multiplicity()
if multp != 1:
raise ValidationError('Broken symmetry is only for singlets.')
if core.get_option('SCF', 'REFERENCE') not in ['UHF', 'UKS']:
raise ValidationError("""You must specify 'set reference uhf' to use broken symmetry.""")
do_broken = True
else:
do_broken = False
if cast and read_orbitals:
raise ValidationError("""Detected options to both cast and read orbitals""")
if cast and do_broken:
raise ValidationError("""Detected options to both cast and perform a broken symmetry computation""")
# broken set-up
if do_broken:
raise ValidationError("""Broken symmetry computations are not currently enabled.""")
scf_molecule.set_multiplicity(3)
core.print_out('\n')
p4util.banner(' Computing high-spin triplet guess ')
core.print_out('\n')
# If we force c1 copy the active molecule
if use_c1:
scf_molecule.update_geometry()
if scf_molecule.schoenflies_symbol() != 'c1':
core.print_out(""" A requested method does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n""")
scf_molecule = scf_molecule.clone()
scf_molecule.reset_point_group('c1')
scf_molecule.fix_orientation(True)
scf_molecule.fix_com(True)
scf_molecule.update_geometry()
# If GUESS is auto guess what it should be
if core.get_option('SCF', 'GUESS') == "AUTO":
if (core.get_option('SCF', 'REFERENCE') in ['RHF', 'RKS']) and \
((scf_molecule.natom() > 1) or core.get_option('SCF', 'SAD_FRAC_OCC')):
core.set_local_option('SCF', 'GUESS', 'SAD')
elif core.get_option('SCF', 'REFERENCE') in ['ROHF', 'ROKS', 'UHF', 'UKS']:
core.set_local_option('SCF', 'GUESS', 'GWH')
else:
core.set_local_option('SCF', 'GUESS', 'CORE')
# the FIRST scf call
if cast or do_broken:
# Cast or broken are special cases
base_wfn = core.Wavefunction.build(scf_molecule, core.get_global_option('BASIS'))
ref_wfn = scf_wavefunction_factory(core.get_option('SCF', 'REFERENCE'), base_wfn)
core.set_legacy_wavefunction(ref_wfn)
# Compute dftd3
if "_disp_functor" in dir(ref_wfn):
disp_energy = ref_wfn._disp_functor.compute_energy(ref_wfn.molecule())
ref_wfn.set_variables("-D Energy", disp_energy)
ref_wfn.compute_energy()
# broken clean-up
if do_broken:
raise ValidationError("Broken Symmetry computations are temporarily disabled.")
scf_molecule.set_multiplicity(1)
core.set_local_option('SCF', 'GUESS', 'READ')
core.print_out('\n')
p4util.banner(' Computing broken symmetry solution from high-spin triplet guess ')
core.print_out('\n')
# cast clean-up
if cast:
# Move files to proper namespace
core.IO.change_file_namespace(180, guesspace, namespace)
core.IO.set_default_namespace(namespace)
# Set to read and project, and reset bases to final ones
optstash2.restore()
core.set_local_option('SCF', 'GUESS', 'READ')
# Print the banner for the standard operation
core.print_out('\n')
p4util.banner(name.upper())
core.print_out('\n')
# EFP preparation
efp = core.get_active_efp()
if efp.nfragments() > 0:
core.set_legacy_molecule(scf_molecule)
core.set_global_option('QMEFP', True) # apt to go haywire if set locally to efp
core.efp_set_options()
efp.set_qm_atoms()
efp.print_out()
# the SECOND scf call
base_wfn = core.Wavefunction.build(scf_molecule, core.get_global_option('BASIS'))
scf_wfn = scf_wavefunction_factory(core.get_option('SCF', 'REFERENCE'), base_wfn)
core.set_legacy_wavefunction(scf_wfn)
read_filename = core.get_writer_file_prefix(scf_molecule.name()) + ".180.npz"
if (core.get_option('SCF', 'GUESS') == 'READ') and os.path.isfile(read_filename):
data = np.load(read_filename)
Ca_occ = core.Matrix.np_read(data, "Ca_occ")
Cb_occ = core.Matrix.np_read(data, "Cb_occ")
symmetry = str(data["symmetry"])
basis_name = str(data["BasisSet"])
if symmetry != scf_molecule.schoenflies_symbol():
raise ValidationError("Cannot compute projection of different symmetries.")
if basis_name == scf_wfn.basisset().name():
core.print_out(" Reading orbitals from file 180, no projection.\n\n")
scf_wfn.guess_Ca(Ca_occ)
scf_wfn.guess_Cb(Cb_occ)
else:
core.print_out(" Reading orbitals from file 180, projecting to new basis.\n\n")
puream = int(data["BasisSet PUREAM"])
if ".gbs" in basis_name:
basis_name = basis_name.split('/')[-1].replace('.gbs', '')
old_basis = core.BasisSet.build(scf_molecule, "ORBITAL", basis_name, puream=puream)
core.print_out(" Computing basis projection from %s to %s\n\n" % (basis_name, base_wfn.basisset().name()))
nalphapi = core.Dimension.from_list(data["nalphapi"])
nbetapi = core.Dimension.from_list(data["nbetapi"])
pCa = scf_wfn.basis_projection(Ca_occ, nalphapi, old_basis, base_wfn.basisset())
pCb = scf_wfn.basis_projection(Cb_occ, nbetapi, old_basis, base_wfn.basisset())
scf_wfn.guess_Ca(pCa)
scf_wfn.guess_Cb(pCb)
# Strip off headers to only get R, RO, U, CU
old_ref = str(data["reference"]).replace("KS", "").replace("HF", "")
new_ref = core.get_option('SCF', 'REFERENCE').replace("KS", "").replace("HF", "")
if old_ref != new_ref:
scf_wfn.reset_occ(True)
elif (core.get_option('SCF', 'GUESS') == 'READ') and not os.path.isfile(read_filename):
core.print_out(" Unable to find file 180, defaulting to SAD guess.\n")
core.set_local_option('SCF', 'GUESS', 'SAD')
if cast:
core.print_out("\n Computing basis projection from %s to %s\n\n" % (ref_wfn.basisset().name(), base_wfn.basisset().name()))
pCa = ref_wfn.basis_projection(ref_wfn.Ca(), ref_wfn.nalphapi(), ref_wfn.basisset(), scf_wfn.basisset())
pCb = ref_wfn.basis_projection(ref_wfn.Cb(), ref_wfn.nbetapi(), ref_wfn.basisset(), scf_wfn.basisset())
scf_wfn.guess_Ca(pCa)
scf_wfn.guess_Cb(pCb)
# Print basis set info
if core.get_option("SCF", "PRINT_BASIS"):
scf_wfn.basisset().print_detail_out()
# Compute dftd3
if "_disp_functor" in dir(scf_wfn):
disp_energy = scf_wfn._disp_functor.compute_energy(scf_wfn.molecule())
scf_wfn.set_variable("-D Energy", disp_energy)
e_scf = scf_wfn.compute_energy()
core.set_variable("SCF TOTAL ENERGY", e_scf)
core.set_variable("CURRENT ENERGY", e_scf)
core.set_variable("CURRENT REFERENCE ENERGY", e_scf)
# We always would like to print a little dipole information
if kwargs.get('scf_do_dipole', True):
oeprop = core.OEProp(scf_wfn)
oeprop.set_title("SCF")
oeprop.add("DIPOLE")
oeprop.compute()
core.set_variable("CURRENT DIPOLE X", core.get_variable("SCF DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.get_variable("SCF DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.get_variable("SCF DIPOLE Z"))
# Write out MO's
if core.get_option("SCF", "PRINT_MOS"):
mowriter = core.MOWriter(scf_wfn)
mowriter.write()
# Write out a molden file
if core.get_option("SCF", "MOLDEN_WRITE"):
filename = core.get_writer_file_prefix(scf_molecule.name()) + ".molden"
dovirt = bool(core.get_option("SCF", "MOLDEN_WITH_VIRTUAL"))
occa = scf_wfn.occupation_a()
occb = scf_wfn.occupation_a()
mw = core.MoldenWriter(scf_wfn)
mw.write(filename, scf_wfn.Ca(), scf_wfn.Cb(), scf_wfn.epsilon_a(),
scf_wfn.epsilon_b(), scf_wfn.occupation_a(),
scf_wfn.occupation_b(), dovirt)
# Write out orbitals and basis
filename = core.get_writer_file_prefix(scf_molecule.name()) + ".180.npz"
data = {}
data.update(scf_wfn.Ca().np_write(None, prefix="Ca"))
data.update(scf_wfn.Cb().np_write(None, prefix="Cb"))
Ca_occ = scf_wfn.Ca_subset("SO", "OCC")
data.update(Ca_occ.np_write(None, prefix="Ca_occ"))
Cb_occ = scf_wfn.Cb_subset("SO", "OCC")
data.update(Cb_occ.np_write(None, prefix="Cb_occ"))
data["reference"] = core.get_option('SCF', 'REFERENCE')
data["nsoccpi"] = scf_wfn.soccpi().to_tuple()
data["ndoccpi"] = scf_wfn.doccpi().to_tuple()
data["nalphapi"] = scf_wfn.nalphapi().to_tuple()
data["nbetapi"] = scf_wfn.nbetapi().to_tuple()
data["symmetry"] = scf_molecule.schoenflies_symbol()
data["BasisSet"] = scf_wfn.basisset().name()
data["BasisSet PUREAM"] = scf_wfn.basisset().has_puream()
np.savez(filename, **data)
extras.register_numpy_file(filename)
optstash.restore()
return scf_wfn
def run_dcft(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density cumulant functional theory calculation.
"""
if (core.get_global_option('FREEZE_CORE') == 'TRUE'):
raise ValidationError('Frozen core is not available for DCFT.')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
if (core.get_global_option("DCFT_TYPE") == "DF"):
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_DCFT",
core.get_global_option("DF_BASIS_DCFT"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_DCFT", aux_basis)
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
dcft_wfn = core.dcft(ref_wfn)
return dcft_wfn
def run_dcft_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
DCFT gradient calculation.
"""
optstash = p4util.OptionsState(
['GLOBALS', 'DERTYPE'])
core.set_global_option('DERTYPE', 'FIRST')
dcft_wfn = run_dcft(name, **kwargs)
derivobj = core.Deriv(dcft_wfn)
derivobj.set_tpdm_presorted(True)
grad = derivobj.compute()
dcft_wfn.set_gradient(grad)
optstash.restore()
return dcft_wfn
def run_dfocc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted or Cholesky-decomposed
(non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'SCF_TYPE'],
['SCF', 'DF_INTS_IO'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'DO_SCS'],
['DFOCC', 'DO_SOS'],
['DFOCC', 'READ_SCF_3INDEX'],
['DFOCC', 'CHOLESKY'],
['DFOCC', 'CC_LAMBDA'])
def set_cholesky_from(mtd_type):
type_val = core.get_global_option(mtd_type)
if type_val == 'DF':
core.set_local_option('DFOCC', 'CHOLESKY', 'FALSE')
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
elif type_val == 'CD':
core.set_local_option('DFOCC', 'CHOLESKY', 'TRUE')
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'CD')
core.print_out(""" SCF Algorithm Type (re)set to CD.\n""")
if core.get_option('SCF', 'SCF_TYPE') != 'CD':
core.set_local_option('DFOCC', 'READ_SCF_3INDEX', 'FALSE')
else:
raise ValidationError("""Invalid type '%s' for DFOCC""" % type_val)
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
set_cholesky_from('MP2_TYPE')
elif name in ['mp2.5', 'omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
set_cholesky_from('MP_TYPE')
elif name in ['mp3', 'omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
set_cholesky_from('MP_TYPE')
elif name in ['lccd', 'olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
set_cholesky_from('CC_TYPE')
elif name == 'ccd':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCD')
set_cholesky_from('CC_TYPE')
elif name == 'ccsd':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD')
set_cholesky_from('CC_TYPE')
elif name == 'ccsd(t)':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(T)')
set_cholesky_from('CC_TYPE')
elif name == 'ccsd(at)':
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(AT)')
set_cholesky_from('CC_TYPE')
elif name == 'dfocc':
pass
else:
raise ValidationError('Unidentified method %s' % (name))
# conventional vs. optimized orbitals
if name in ['mp2', 'mp2.5', 'mp3', 'lccd',
'ccd', 'ccsd', 'ccsd(t)', 'ccsd(at)']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp2.5', 'omp3', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if not core.get_local_option("DFOCC", "CHOLESKY"):
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
optstash.restore()
return dfocc_wfn
def run_dfocc_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted (non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'SCF_TYPE'],
['SCF', 'DF_INTS_IO'],
['REFERENCE'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'CC_LAMBDA'],
['GLOBALS', 'DERTYPE'])
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if core.get_option('SCF', 'SCF_TYPE') != 'DF':
raise ValidationError('DFOCC gradients need DF-HF reference, for now.')
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
elif name in ['mp2.5', 'omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
elif name in ['mp3', 'omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
elif name in ['lccd', 'olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
elif name in ['ccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCD')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
elif name in ['ccsd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
else:
raise ValidationError('Unidentified method %s' % (name))
if name in ['mp2', 'mp2.5', 'mp3', 'lccd', 'ccd', 'ccsd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp2.5', 'omp3', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_global_option('DERTYPE', 'FIRST')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
optstash.restore()
return dfocc_wfn
def run_dfocc_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted (non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'SCF_TYPE'],
['SCF', 'DF_INTS_IO'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'OEPROP'])
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
else:
raise ValidationError('Unidentified method ' % (name))
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if core.get_option('SCF', 'SCF_TYPE') != 'DF':
raise ValidationError('DFOCC gradients need DF-HF reference, for now.')
if name in ['mp2']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'OEPROP', 'TRUE')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
optstash.restore()
return dfocc_wfn
def run_qchf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an density-fitted orbital-optimized MP2 computation
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DF_BASIS_SCF'],
['DIE_IF_NOT_CONVERGED'],
['MAXITER'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'QCHF'],
['DFOCC', 'E_CONVERGENCE'])
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'WFN_TYPE', 'QCHF')
core.set_local_option('DFOCC', 'QCHF', 'TRUE')
core.set_local_option('DFOCC', 'E_CONVERGENCE', 8)
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
core.set_local_option('SCF', 'DIE_IF_NOT_CONVERGED', 'FALSE')
core.set_local_option('SCF', 'MAXITER', 1)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" QCHF does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
return dfocc_wfn
def run_occ(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a conventional integral (O)MPN computation
"""
optstash = p4util.OptionsState(
['OCC', 'SCS_TYPE'],
['OCC', 'DO_SCS'],
['OCC', 'SOS_TYPE'],
['OCC', 'DO_SOS'],
['OCC', 'ORB_OPT'],
['OCC', 'WFN_TYPE'])
if name == 'mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
elif name == 'omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
elif name == 'scs-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'TRUE')
core.set_local_option('OCC', 'SCS_TYPE', 'SCS')
elif name == 'scs(n)-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'TRUE')
core.set_local_option('OCC', 'SCS_TYPE', 'SCSN')
elif name == 'scs-omp2-vdw':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'TRUE')
core.set_local_option('OCC', 'SCS_TYPE', 'SCSVDW')
elif name == 'sos-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SOS', 'TRUE')
core.set_local_option('OCC', 'SOS_TYPE', 'SOS')
elif name == 'sos-pi-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SOS', 'TRUE')
core.set_local_option('OCC', 'SOS_TYPE', 'SOSPI')
elif name == 'mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
elif name == 'omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
elif name == 'mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
elif name == 'omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
elif name == 'scs-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'TRUE')
core.set_local_option('OCC', 'SCS_TYPE', 'SCS')
elif name == 'scs(n)-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'TRUE')
core.set_local_option('OCC', 'SCS_TYPE', 'SCSN')
elif name == 'scs-omp3-vdw':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'TRUE')
core.set_local_option('OCC', 'SCS_TYPE', 'SCSVDW')
elif name == 'sos-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SOS', 'TRUE')
core.set_local_option('OCC', 'SOS_TYPE', 'SOS')
elif name == 'sos-pi-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SOS', 'TRUE')
core.set_local_option('OCC', 'SOS_TYPE', 'SOSPI')
elif name == 'lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
elif name == 'olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
else:
raise ValidationError("""Invalid method %s""" % name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
occ_wfn = core.occ(ref_wfn)
optstash.restore()
return occ_wfn
def run_occ_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a conventional integral (O)MPN computation
"""
optstash = p4util.OptionsState(
['OCC', 'ORB_OPT'],
['OCC', 'WFN_TYPE'],
['OCC', 'DO_SCS'],
['OCC', 'DO_SOS'],
['GLOBALS', 'DERTYPE'])
if name == 'mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'conv-omp2']:
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
else:
raise ValidationError("""Invalid method %s""" % name)
core.set_global_option('DERTYPE', 'FIRST')
# locking out SCS through explicit keyword setting
# * so that current energy must match call
# * since grads not avail for scs
core.set_local_option('OCC', 'DO_SCS', 'FALSE')
core.set_local_option('OCC', 'DO_SOS', 'FALSE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
occ_wfn = core.occ(ref_wfn)
derivobj = core.Deriv(occ_wfn)
grad = derivobj.compute()
occ_wfn.set_gradient(grad)
optstash.restore()
return occ_wfn
def run_scf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a self-consistent-field theory (HF & DFT) calculation.
"""
core.tstart() # Manage start and stop as there is no C wrapper
optstash = proc_util.scf_set_reference_local(name)
scf_wfn = scf_helper(name, **kwargs)
optstash.restore()
core.tstop()
return scf_wfn
def run_scf_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a SCF gradient calculation.
"""
optstash = proc_util.scf_set_reference_local(name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = run_scf(name, **kwargs)
if core.get_option('SCF', 'REFERENCE') in ['ROHF', 'CUHF']:
ref_wfn.semicanonicalize()
if "_disp_functor" in dir(ref_wfn):
disp_grad = ref_wfn._disp_functor.compute_gradient(ref_wfn.molecule())
ref_wfn.set_array("-D Gradient", disp_grad)
grad = core.scfgrad(ref_wfn)
ref_wfn.set_gradient(grad)
optstash.restore()
return ref_wfn
def run_scf_hessian(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an SCF hessian calculation.
"""
optstash = proc_util.scf_set_reference_local(name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = run_scf(name, **kwargs)
badref = core.get_option('SCF', 'REFERENCE') in ['UHF', 'ROHF', 'CUHF', 'RKS', 'UKS']
badint = core.get_option('SCF', 'SCF_TYPE') in [ 'CD', 'OUT_OF_CORE']
if badref or badint:
raise ValidationError("Only RHF Hessians are currently implemented. SCF_TYPE either CD or OUT_OF_CORE not supported")
H = core.scfhess(ref_wfn)
ref_wfn.set_hessian(H)
# Temporary freq code. To be replaced with proper frequency analysis later...
import numpy as np
mol = ref_wfn.molecule()
natoms = mol.natom()
masses = np.zeros(natoms)
for atom in range(natoms):
masses[atom] = mol.mass(atom)
m = np.repeat( np.divide(1.0, np.sqrt(masses)), 3)
mwhess = np.einsum('i,ij,j->ij', m, H, m)
# Are we linear?
if mol.get_full_point_group() in [ "C_inf_v", "D_inf_h" ]:
nexternal = 5
else:
nexternal = 6
fcscale = psi_hartree2J / (psi_bohr2m * psi_bohr2m * psi_amu2kg);
fc = fcscale * np.linalg.eigvalsh(mwhess)
# Sort by magnitude of the force constants, to project out rot/vib
ordering = np.argsort(np.abs(fc))
projected = fc[ordering][nexternal:]
freqs = np.sqrt(np.abs(projected))
freqs *= 1.0 / (2.0 * np.pi * psi_c * 100.0)
freqs[projected < 0] *= -1
freqs.sort()
freqvec = core.Vector.from_array(freqs)
ref_wfn.set_frequencies(freqvec)
# End of temporary freq hack. Remove me later!
# Write Hessian out. This probably needs a more permanent home, too.
# This is a drop-in replacement for the code that lives in findif
if core.get_option('FINDIF', 'HESSIAN_WRITE'):
molname = ref_wfn.molecule().name()
prefix = core.get_writer_file_prefix(molname)
with open(prefix+".hess", 'w') as fp:
fp.write("%5d%5d\n" % (natoms, 6*natoms))
for row in np.reshape(H, (-1, 3)):
fp.write("%20.10f%20.10f%20.10f\n" % tuple(row))
optstash.restore()
return ref_wfn
def run_libfock(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a calculation through libfock, namely RCPHF,
RCIS, RTDHF, RTDA, and RTDDFT.
"""
if name == 'cphf':
core.set_global_option('MODULE', 'RCPHF')
if name == 'cis':
core.set_global_option('MODULE', 'RCIS')
if name == 'tdhf':
core.set_global_option('MODULE', 'RTDHF')
if name == 'cpks':
core.set_global_option('MODULE', 'RCPKS')
if name == 'tda':
core.set_global_option('MODULE', 'RTDA')
if name == 'tddft':
core.set_global_option('MODULE', 'RTDDFT')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
libfock_wfn = core.libfock(ref_wfn)
libfock_wfn.compute_energy()
return libfock_wfn
def run_mcscf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a multiconfigurational self-consistent-field calculation.
"""
# Make sure the molecule the user provided is the active one
mcscf_molecule = kwargs.get('molecule', core.get_active_molecule())
mcscf_molecule.update_geometry()
if 'ref_wfn' in kwargs:
raise ValidationError("It is not possible to pass run_mcscf a reference wavefunction")
new_wfn = core.Wavefunction.build(mcscf_molecule, core.get_global_option('BASIS'))
return core.mcscf(new_wfn)
def run_dfmp2_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DFMP2 gradient calculation.
"""
core.tstart()
optstash = p4util.OptionsState(
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['SCF_TYPE']) # yes, this really must be global, not local to SCF
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if core.get_option('SCF', 'SCF_TYPE') != 'DF':
raise ValidationError('DF-MP2 gradients need DF-SCF reference.')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dfmp2_wfn = core.dfmp2(ref_wfn)
grad = dfmp2_wfn.compute_gradient()
dfmp2_wfn.set_gradient(grad)
optstash.restore()
core.tstop()
return dfmp2_wfn
def run_ccenergy(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a CCSD, CC2, and CC3 calculation.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'])
if name == 'ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD')
core.set_local_option('CCSORT', 'WFN', 'CCSD')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD')
core.set_local_option('CCENERGY', 'WFN', 'CCSD')
elif name == 'ccsd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD_T')
core.set_local_option('CCSORT', 'WFN', 'CCSD_T')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD_T')
core.set_local_option('CCENERGY', 'WFN', 'CCSD_T')
elif name == 'ccsd(at)':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD_AT')
core.set_local_option('CCSORT', 'WFN', 'CCSD_AT')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD_AT')
core.set_local_option('CCENERGY', 'WFN', 'CCSD_AT')
core.set_local_option('CCHBAR', 'WFN', 'CCSD_AT')
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD_AT')
elif name == 'cc2':
core.set_local_option('TRANSQT2', 'WFN', 'CC2')
core.set_local_option('CCSORT', 'WFN', 'CC2')
core.set_local_option('CCTRANSORT', 'WFN', 'CC2')
core.set_local_option('CCENERGY', 'WFN', 'CC2')
elif name == 'cc3':
core.set_local_option('TRANSQT2', 'WFN', 'CC3')
core.set_local_option('CCSORT', 'WFN', 'CC3')
core.set_local_option('CCTRANSORT', 'WFN', 'CC3')
core.set_local_option('CCENERGY', 'WFN', 'CC3')
elif name == 'eom-cc2':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC2')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCTRANSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC2')
elif name == 'eom-ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CCSD')
core.set_local_option('CCSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCTRANSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CCSD')
# Call a plain energy('ccenergy') and have full control over options, incl. wfn
elif name == 'ccenergy':
pass
# Bypass routine scf if user did something special to get it to converge
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_global_option("CC_TYPE") == "DF":
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
wfn.set_basisset("DF_BASIS_CC", aux_basis)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
# Obtain semicanonical orbitals
if (core.get_option('SCF', 'REFERENCE') == 'ROHF') and \
((name in ['ccsd(t)', 'ccsd(at)', 'cc2', 'cc3', 'eom-cc2', 'eom-cc3']) or
core.get_option('CCTRANSORT', 'SEMICANONICAL')):
ref_wfn.semicanonicalize()
if core.get_global_option('RUN_CCTRANSORT'):
core.cctransort(ref_wfn)
else:
try:
from psi4.driver.pasture import addins
addins.ccsort_transqt2(ref_wfn)
except:
raise PastureRequiredError("RUN_CCTRANSORT")
ccwfn = core.ccenergy(ref_wfn)
if name == 'ccsd(at)':
core.cchbar(ref_wfn)
core.cclambda(ref_wfn)
optstash.restore()
return ccwfn
def run_ccenergy_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a CCSD and CCSD(T) gradient calculation.
"""
optstash = p4util.OptionsState(
['GLOBALS', 'DERTYPE'],
['CCLAMBDA', 'WFN'],
['CCDENSITY', 'WFN'])
core.set_global_option('DERTYPE', 'FIRST')
if core.get_global_option('FREEZE_CORE') == 'TRUE':
raise ValidationError('Frozen core is not available for the CC gradients.')
ccwfn = run_ccenergy(name, **kwargs)
if name == 'ccsd':
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD')
core.set_local_option('CCDENSITY', 'WFN', 'CCSD')
elif name == 'ccsd(t)':
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD_T')
core.set_local_option('CCDENSITY', 'WFN', 'CCSD_T')
core.cchbar(ccwfn)
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
derivobj = core.Deriv(ccwfn)
grad = derivobj.compute()
del derivobj
ccwfn.set_gradient(grad)
optstash.restore()
return ccwfn
def run_bccd(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a Brueckner CCD calculation.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'])
if name == 'bccd':
core.set_local_option('TRANSQT2', 'WFN', 'BCCD')
core.set_local_option('CCSORT', 'WFN', 'BCCD')
core.set_local_option('CCTRANSORT', 'WFN', 'BCCD')
core.set_local_option('CCENERGY', 'WFN', 'BCCD')
elif name == 'bccd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'BCCD_T')
core.set_local_option('CCSORT', 'WFN', 'BCCD_T')
core.set_local_option('CCENERGY', 'WFN', 'BCCD_T')
core.set_local_option('CCTRANSORT', 'WFN', 'BCCD_T')
core.set_local_option('CCTRIPLES', 'WFN', 'BCCD_T')
else:
raise ValidationError("proc.py:run_bccd name %s not recognized" % name)
# Bypass routine scf if user did something special to get it to converge
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Needed for (T).
if (core.get_option('SCF', 'REFERENCE') == 'ROHF'):
ref_wfn.semicanonicalize()
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
core.set_local_option('CCTRANSORT', 'DELETE_TEI', 'false')
bcc_iter_cnt = 0
if (core.get_global_option("RUN_CCTRANSORT")):
sort_func = core.cctransort
else:
try:
from psi4.driver.pasture import addins
core.set_local_option('TRANSQT2', 'DELETE_TEI', 'false')
sort_func = addins.ccsort_transqt2
except:
raise PastureRequiredError("RUN_CCTRANSORT")
while True:
sort_func(ref_wfn)
ref_wfn = core.ccenergy(ref_wfn)
core.print_out('Brueckner convergence check: %s\n' % bool(core.get_variable('BRUECKNER CONVERGED')))
if (core.get_variable('BRUECKNER CONVERGED') == True):
break
if bcc_iter_cnt >= core.get_option('CCENERGY', 'BCCD_MAXITER'):
core.print_out("\n\nWarning! BCCD did not converge within the maximum number of iterations.")
core.print_out("You can increase the number of BCCD iterations by changing BCCD_MAXITER.\n\n")
break
bcc_iter_cnt += 1
if name == 'bccd(t)':
core.cctriples(ref_wfn)
optstash.restore()
return ref_wfn
def run_dft_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
DFT calculations. This is a simple alias to :py:func:`~proc.run_scf`
since DFT properties all handled through oeprop.
"""
optstash = proc_util.dft_set_reference_local(name)
properties = kwargs.pop('properties')
proc_util.oeprop_validator(properties)
scf_wfn = run_scf(name, scf_do_dipole=False, *kwargs)
# Run OEProp
oe = core.OEProp(scf_wfn)
oe.set_title(name.upper())
for prop in properties:
oe.add(prop.upper())
oe.compute()
scf_wfn.set_oeprop(oe)
optstash.restore()
return scf_wfn
def run_scf_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
SCF calculations. This is a simple alias to :py:func:`~proc.run_scf`
since SCF properties all handled through oeprop.
"""
optstash = proc_util.scf_set_reference_local(name)
properties = kwargs.pop('properties')
proc_util.oeprop_validator(properties)
scf_wfn = run_scf(name, scf_do_dipole=False, **kwargs)
# Run OEProp
oe = core.OEProp(scf_wfn)
oe.set_title(name.upper())
for prop in properties:
oe.add(prop.upper())
oe.compute()
scf_wfn.set_oeprop(oe)
optstash.restore()
return scf_wfn
def run_cc_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
all CC property calculations.
"""
optstash = p4util.OptionsState(
['WFN'],
['DERTYPE'],
['ONEPDM'],
['PROPERTY'],
['CCLAMBDA', 'R_CONVERGENCE'],
['CCEOM', 'R_CONVERGENCE'],
['CCEOM', 'E_CONVERGENCE'])
oneel_properties = ['dipole', 'quadrupole']
twoel_properties = []
response_properties = ['polarizability', 'rotation', 'roa', 'roa_tensor']
excited_properties = ['oscillator_strength', 'rotational_strength']
one = []
two = []
response = []
excited = []
invalid = []
if 'properties' in kwargs:
properties = kwargs['properties']
for prop in properties:
if prop in oneel_properties:
one.append(prop)
elif prop in twoel_properties:
two.append(prop)
elif prop in response_properties:
response.append(prop)
elif prop in excited_properties:
excited.append(prop)
else:
invalid.append(prop)
else:
raise ValidationError("""The "properties" keyword is required with the property() function.""")
n_one = len(one)
n_two = len(two)
n_response = len(response)
n_excited = len(excited)
n_invalid = len(invalid)
if n_invalid > 0:
print("""The following properties are not currently supported: %s""" % invalid)
if n_excited > 0 and (name not in ['eom-ccsd', 'eom-cc2']):
raise ValidationError("""Excited state CC properties require EOM-CC2 or EOM-CCSD.""")
if (name in ['eom-ccsd', 'eom-cc2']) and n_response > 0:
raise ValidationError("""Cannot (yet) compute response properties for excited states.""")
if 'roa' in response:
# Perform distributed roa job
run_roa(name, **kwargs)
return # Don't do anything further
if (n_one > 0 or n_two > 0) and (n_response > 0):
print("""Computing both density- and response-based properties.""")
if name in ['ccsd', 'cc2', 'eom-ccsd', 'eom-cc2']:
this_name = name.upper().replace('-', '_')
core.set_global_option('WFN', this_name)
ccwfn = run_ccenergy(name, **kwargs)
core.set_global_option('WFN', this_name)
else:
raise ValidationError("""CC property name %s not recognized""" % name.upper())
# Need cchbar for everything
core.cchbar(ccwfn)
# Need ccdensity at this point only for density-based props
if n_one > 0 or n_two > 0:
if name == 'eom-ccsd':
core.set_global_option('WFN', 'EOM_CCSD')
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cceom(ccwfn)
elif name == 'eom-cc2':
core.set_global_option('WFN', 'EOM_CC2')
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cceom(ccwfn)
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
# Need ccresponse only for response-type props
if n_response > 0:
core.set_global_option('DERTYPE', 'RESPONSE')
core.cclambda(ccwfn)
for prop in response:
core.set_global_option('PROPERTY', prop)
core.ccresponse(ccwfn)
# Excited-state transition properties
if n_excited > 0:
if name == 'eom-ccsd':
core.set_global_option('WFN', 'EOM_CCSD')
elif name == 'eom-cc2':
core.set_global_option('WFN', 'EOM_CC2')
else:
raise ValidationError("""Unknown excited-state CC wave function.""")
core.set_global_option('DERTYPE', 'NONE')
core.set_global_option('ONEPDM', 'TRUE')
# Tight convergence unnecessary for transition properties
core.set_local_option('CCLAMBDA','R_CONVERGENCE',1e-4)
core.set_local_option('CCEOM','R_CONVERGENCE',1e-4)
core.set_local_option('CCEOM','E_CONVERGENCE',1e-5)
core.cceom(ccwfn)
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
core.set_global_option('WFN', 'SCF')
core.revoke_global_option_changed('WFN')
core.set_global_option('DERTYPE', 'NONE')
core.revoke_global_option_changed('DERTYPE')
optstash.restore()
return ccwfn
def run_dfmp2_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DFMP2 property calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['ONEPDM'],
['OPDM_RELAX'],
['SCF_TYPE'])
core.set_global_option('ONEPDM', 'TRUE')
core.set_global_option('OPDM_RELAX', 'TRUE')
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF') # local set insufficient b/c SCF option read in DFMP2
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if not core.get_option('SCF', 'SCF_TYPE') == 'DF':
raise ValidationError('DF-MP2 properties need DF-SCF reference.')
properties = kwargs.pop('properties')
proc_util.oeprop_validator(properties)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, scf_do_dipole=False, use_c1=True, **kwargs) # C1 certified
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
dfmp2_wfn = core.dfmp2(ref_wfn)
grad = dfmp2_wfn.compute_gradient()
if name == 'scs-mp2':
core.set_variable('CURRENT ENERGY', core.get_variable('SCS-MP2 TOTAL ENERGY'))
core.set_variable('CURRENT CORRELATION ENERGY', core.get_variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'mp2':
core.set_variable('CURRENT ENERGY', core.get_variable('MP2 TOTAL ENERGY'))
core.set_variable('CURRENT CORRELATION ENERGY', core.get_variable('MP2 CORRELATION ENERGY'))
# Run OEProp
oe = core.OEProp(dfmp2_wfn)
oe.set_title(name.upper())
for prop in properties:
oe.add(prop.upper())
oe.compute()
dfmp2_wfn.set_oeprop(oe)
optstash.restore()
return dfmp2_wfn
def run_detci_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a configuration interaction calculation, namely FCI,
CIn, MPn, and ZAPTn, computing properties.
"""
optstash = p4util.OptionsState(
['OPDM'],
['TDM'])
# Find valid properties
valid_transition = ['TRANSITION_DIPOLE', 'TRANSITION_QUADRUPOLE']
ci_prop = []
ci_trans = []
properties = kwargs.pop('properties')
for prop in properties:
if prop.upper() in valid_transition:
ci_trans.append(prop)
else:
ci_prop.append(prop)
proc_util.oeprop_validator(ci_prop)
core.set_global_option('OPDM', 'TRUE')
if len(ci_trans):
core.set_global_option('TDM', 'TRUE')
# Compute
if name in ['mcscf', 'rasscf', 'casscf']:
ciwfn = run_detcas(name, **kwargs)
else:
ciwfn = run_detci(name, **kwargs)
# All property names are just CI
if 'CI' in name.upper():
name = 'CI'
states = core.get_global_option('avg_states')
nroots = core.get_global_option('num_roots')
if len(states) != nroots:
states = range(nroots)
# Run OEProp
oe = core.OEProp(ciwfn)
oe.set_title(name.upper())
for prop in ci_prop:
oe.add(prop.upper())
# Compute "the" CI density
oe.compute()
ciwfn.set_oeprop(oe)
# If we have more than one root, compute all data
if nroots > 1:
core.print_out("\n ===> %s properties for all CI roots <=== \n\n" % name.upper())
for root in states:
oe.set_title("%s ROOT %d" % (name.upper(), root))
if ciwfn.same_a_b_dens():
oe.set_Da_mo(ciwfn.get_opdm(root, root, "A", True))
else:
oe.set_Da_mo(ciwfn.get_opdm(root, root, "A", True))
oe.set_Db_mo(ciwfn.get_opdm(root, root, "B", True))
oe.compute()
# Transition density matrices
if (nroots > 1) and len(ci_trans):
oe.clear()
for tprop in ci_trans:
oe.add(tprop.upper())
core.print_out("\n ===> %s properties for all CI transition density matrices <=== \n\n" % name.upper())
for root in states[1:]:
oe.set_title("%s ROOT %d -> ROOT %d" % (name.upper(), 0, root))
if ciwfn.same_a_b_dens():
oe.set_Da_mo(ciwfn.get_opdm(0, root, "A", True))
else:
oe.set_Da_mo(ciwfn.get_opdm(0, root, "A", True))
oe.set_Db_mo(ciwfn.get_opdm(0, root, "B", True))
oe.compute()
optstash.restore()
return ciwfn
def run_eom_cc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an EOM-CC calculation, namely EOM-CC2, EOM-CCSD, and EOM-CC3.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'],
['CCHBAR', 'WFN'],
['CCEOM', 'WFN'])
if name == 'eom-ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CCSD')
core.set_local_option('CCSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CCSD')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CCSD')
core.set_local_option('CCEOM', 'WFN', 'EOM_CCSD')
ref_wfn = run_ccenergy('ccsd', **kwargs)
elif name == 'eom-cc2':
user_ref = core.get_option('CCENERGY', 'REFERENCE')
if (user_ref != 'RHF') and (user_ref != 'UHF'):
raise ValidationError('Reference %s for EOM-CC2 is not available.' % user_ref)
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC2')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC2')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CC2')
core.set_local_option('CCEOM', 'WFN', 'EOM_CC2')
ref_wfn = run_ccenergy('cc2', **kwargs)
elif name == 'eom-cc3':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC3')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC3')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC3')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CC3')
core.set_local_option('CCEOM', 'WFN', 'EOM_CC3')
ref_wfn = run_ccenergy('cc3', **kwargs)
core.cchbar(ref_wfn)
core.cceom(ref_wfn)
optstash.restore()
return ref_wfn
# TODO ask if all these cc modules not actually changing wfn
def run_eom_cc_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an EOM-CCSD gradient calculation.
"""
optstash = p4util.OptionsState(
['CCDENSITY', 'XI'],
['CCDENSITY', 'ZETA'],
['CCLAMBDA', 'ZETA'],
['DERTYPE'],
['CCDENSITY', 'WFN'],
['CCLAMBDA', 'WFN'])
core.set_global_option('DERTYPE', 'FIRST')
if name == 'eom-ccsd':
core.set_local_option('CCLAMBDA', 'WFN', 'EOM_CCSD')
core.set_local_option('CCDENSITY', 'WFN', 'EOM_CCSD')
ref_wfn = run_eom_cc(name, **kwargs)
else:
core.print_out('DGAS: proc.py:1599 hitting an undefined sequence')
core.clean()
raise ValueError('Hit a wall in proc.py:1599')
core.set_local_option('CCLAMBDA', 'ZETA', 'FALSE')
core.set_local_option('CCDENSITY', 'ZETA', 'FALSE')
core.set_local_option('CCDENSITY', 'XI', 'TRUE')
core.cclambda(ref_wfn)
core.ccdensity(ref_wfn)
core.set_local_option('CCLAMBDA', 'ZETA', 'TRUE')
core.set_local_option('CCDENSITY', 'ZETA', 'TRUE')
core.set_local_option('CCDENSITY', 'XI', 'FALSE')
core.cclambda(ref_wfn)
core.ccdensity(ref_wfn)
derivobj = core.Deriv(ref_wfn)
grad = derivobj.compute()
ref_wfn.set_gradient(grad)
optstash.restore()
return ref_wfn
def run_adc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an algebraic diagrammatic construction calculation.
.. caution:: Get rid of active molecule lines- should be handled in energy.
"""
if core.get_option('ADC', 'REFERENCE') != 'RHF':
raise ValidationError('ADC requires reference RHF')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
return core.adc(ref_wfn)
def run_dft(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-functional-theory calculation.
"""
optstash = p4util.OptionsState(
['SCF', 'DFT_FUNCTIONAL'],
['SCF', 'REFERENCE'],
['SCF', 'SCF_TYPE'],
['DF_BASIS_MP2'],
['DFMP2', 'MP2_OS_SCALE'],
['DFMP2', 'MP2_SS_SCALE'])
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_local_option('SCF', 'SCF_TYPE', 'DF')
core.set_local_option('SCF', 'DFT_FUNCTIONAL', name)
user_ref = core.get_option('SCF', 'REFERENCE')
if (user_ref == 'RHF'):
core.set_local_option('SCF', 'REFERENCE', 'RKS')
elif (user_ref == 'UHF'):
core.set_local_option('SCF', 'REFERENCE', 'UKS')
elif (user_ref == 'ROHF'):
raise ValidationError('ROHF reference for DFT is not available.')
elif (user_ref == 'CUHF'):
raise ValidationError('CUHF reference for DFT is not available.')
scf_wfn = run_scf(name, **kwargs)
returnvalue = core.get_variable('CURRENT ENERGY')
for ssuper in dft_functional.superfunctional_list:
if ssuper.name().lower() == name:
dfun = ssuper
if dfun.is_c_hybrid():
core.tstart()
aux_basis = core.BasisSet.build(scf_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'),
puream=-1)
scf_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
if dfun.is_c_scs_hybrid():
core.set_local_option('DFMP2', 'MP2_OS_SCALE', dfun.c_os_alpha())
core.set_local_option('DFMP2', 'MP2_SS_SCALE', dfun.c_ss_alpha())
dfmp2_wfn = core.dfmp2(scf_wfn)
dfmp2_wfn.compute_energy()
vdh = dfun.c_alpha() * core.get_variable('SCS-MP2 CORRELATION ENERGY')
else:
dfmp2_wfn = core.dfmp2(scf_wfn)
dfmp2_wfn.compute_energy()
vdh = dfun.c_alpha() * core.get_variable('MP2 CORRELATION ENERGY')
# TODO: delete these variables, since they don't mean what they look to mean?
# 'MP2 TOTAL ENERGY',
# 'MP2 CORRELATION ENERGY',
# 'MP2 SAME-SPIN CORRELATION ENERGY']
core.set_variable('DOUBLE-HYBRID CORRECTION ENERGY', vdh)
returnvalue += vdh
core.set_variable('DFT TOTAL ENERGY', returnvalue)
core.set_variable('CURRENT ENERGY', returnvalue)
core.print_out('\n\n')
core.print_out(' %s Energy Summary\n' % (name.upper()))
core.print_out(' -------------------------\n')
core.print_out(' DFT Reference Energy = %22.16lf\n' % (returnvalue - vdh))
core.print_out(' Scaled MP2 Correlation = %22.16lf\n' % (vdh))
core.print_out(' @Final double-hybrid DFT total energy = %22.16lf\n\n' % (returnvalue))
core.tstop()
optstash.restore()
return scf_wfn
def run_dft_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-functional-theory gradient calculation.
"""
optstash = p4util.OptionsState(
['SCF', 'DFT_FUNCTIONAL'],
['SCF', 'REFERENCE'],
['SCF', 'SCF_TYPE'])
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_local_option('SCF', 'SCF_TYPE', 'DF')
core.set_local_option('SCF', 'DFT_FUNCTIONAL', name.upper())
user_ref = core.get_option('SCF', 'REFERENCE')
if (user_ref == 'RHF'):
core.set_local_option('SCF', 'REFERENCE', 'RKS')
elif (user_ref == 'UHF'):
core.set_local_option('SCF', 'REFERENCE', 'UKS')
elif (user_ref == 'ROHF'):
raise ValidationError('ROHF reference for DFT is not available.')
elif (user_ref == 'CUHF'):
raise ValidationError('CUHF reference for DFT is not available.')
wfn = run_scf_gradient(name, **kwargs)
optstash.restore()
return wfn
def run_detci(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a configuration interaction calculation, namely FCI,
CIn, MPn, and ZAPTn.
"""
optstash = p4util.OptionsState(
['DETCI', 'WFN'],
['DETCI', 'MAX_NUM_VECS'],
['DETCI', 'MPN_ORDER_SAVE'],
['DETCI', 'MPN'],
['DETCI', 'FCI'],
['DETCI', 'EX_LEVEL'])
if core.get_option('DETCI', 'REFERENCE') not in ['RHF', 'ROHF']:
raise ValidationError('Reference %s for DETCI is not available.' %
core.get_option('DETCI', 'REFERENCE'))
if name == 'zapt':
core.set_local_option('DETCI', 'WFN', 'ZAPTN')
level = kwargs['level']
maxnvect = int((level + 1) / 2) + (level + 1) % 2
core.set_local_option('DETCI', 'MAX_NUM_VECS', maxnvect)
if (level + 1) % 2:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 2)
else:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 1)
elif name in ['mp', 'mp2', 'mp3', 'mp4']:
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'MPN', 'TRUE')
if name == 'mp2':
level = 2
elif name == 'mp3':
level = 3
elif name == 'mp4':
level = 4
else:
level = kwargs['level']
maxnvect = int((level + 1) / 2) + (level + 1) % 2
core.set_local_option('DETCI', 'MAX_NUM_VECS', maxnvect)
if (level + 1) % 2:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 2)
else:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 1)
elif name == 'ccsd':
# untested
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'CC', 'TRUE')
core.set_local_option('DETCI', 'CC_EX_LEVEL', 2)
elif name == 'fci':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'FCI', 'TRUE')
elif name == 'cisd':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 2)
elif name == 'cisdt':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 3)
elif name == 'cisdtq':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 4)
elif name == 'ci':
core.set_local_option('DETCI', 'WFN', 'DETCI')
level = kwargs['level']
core.set_local_option('DETCI', 'EX_LEVEL', level)
elif name == 'detci':
pass
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
ciwfn = core.detci(ref_wfn)
if core.get_global_option("DIPMOM") and ("mp" not in name.lower()):
# We always would like to print a little dipole information
oeprop = core.OEProp(ciwfn)
oeprop.set_title(name.upper())
oeprop.add("DIPOLE")
oeprop.compute()
ciwfn.set_oeprop(oeprop)
core.set_variable("CURRENT DIPOLE X", core.get_variable(name.upper() + " DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.get_variable(name.upper() + " DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.get_variable(name.upper() + " DIPOLE Z"))
optstash.restore()
return ciwfn
def run_dfmp2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted MP2 calculation.
"""
core.tstart()
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF', 'SCF_TYPE'])
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_local_option('SCF', 'SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
if core.get_global_option('REFERENCE') == "ROHF":
ref_wfn.semicanonicalize()
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dfmp2_wfn = core.dfmp2(ref_wfn)
dfmp2_wfn.compute_energy()
if name == 'scs-mp2':
core.set_variable('CURRENT ENERGY', core.get_variable('SCS-MP2 TOTAL ENERGY'))
core.set_variable('CURRENT CORRELATION ENERGY', core.get_variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'mp2':
core.set_variable('CURRENT ENERGY', core.get_variable('MP2 TOTAL ENERGY'))
core.set_variable('CURRENT CORRELATION ENERGY', core.get_variable('MP2 CORRELATION ENERGY'))
optstash.restore()
core.tstop()
return dfmp2_wfn
def run_dmrgscf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an DMRG calculation.
"""
optstash = p4util.OptionsState(
['SCF', 'SCF_TYPE'],
['DMRG', 'DMRG_CASPT2_CALC'])
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
if 'CASPT2' in name.upper():
core.set_local_option("DMRG", "DMRG_CASPT2_CALC", True)
dmrg_wfn = core.dmrg(ref_wfn)
optstash.restore()
return dmrg_wfn
def run_dmrgci(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an DMRG calculation.
"""
optstash = p4util.OptionsState(
['SCF', 'SCF_TYPE'],
['DMRG', 'DMRG_SCF_MAX_ITER'])
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
core.set_local_option('DMRG', 'DMRG_SCF_MAX_ITER', 1)
dmrg_wfn = core.dmrg(ref_wfn)
optstash.restore()
return dmrg_wfn
def run_psimrcc(name, **kwargs):
"""Function encoding sequence of PSI module calls for a PSIMRCC computation
using a reference from the MCSCF module
"""
mcscf_wfn = run_mcscf(name, **kwargs)
psimrcc_e = core.psimrcc(mcscf_wfn)
return mcscf_wfn
def run_psimrcc_scf(name, **kwargs):
"""Function encoding sequence of PSI module calls for a PSIMRCC computation
using a reference from the SCF module
"""
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
psimrcc_e = core.psimrcc(ref_wfn)
return ref_wfn
def run_sapt(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a SAPT calculation of any level.
"""
optstash = p4util.OptionsState(
['SCF', 'SCF_TYPE'])
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_local_option('SCF', 'SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer.update_geometry() # make sure since mol from wfn, kwarg, or P::e
sapt_dimer.fix_orientation(True)
sapt_dimer.fix_com(True)
# Shifting to C1 so we need to copy the active molecule
if sapt_dimer.schoenflies_symbol() != 'c1':
core.print_out(' SAPT does not make use of molecular symmetry, further calculations in C1 point group.\n')
sapt_dimer = sapt_dimer.clone()
sapt_dimer.reset_point_group('c1')
sapt_dimer.fix_orientation(True)
sapt_dimer.fix_com(True)
sapt_dimer.update_geometry()
if (core.get_option('SCF', 'REFERENCE') != 'RHF') and (name.upper() != "SAPT0"):
raise ValidationError('Only SAPT0 supports a reference different from \"reference rhf\".')
nfrag = sapt_dimer.nfragments()
if nfrag != 2:
raise ValidationError('SAPT requires active molecule to have 2 fragments, not %s.' % (nfrag))
do_delta_mp2 = True if name.endswith('dmp2') else False
sapt_basis = 'dimer'
if 'sapt_basis' in kwargs:
sapt_basis = kwargs.pop('sapt_basis')
sapt_basis = sapt_basis.lower()
if sapt_basis == 'dimer':
monomerA = sapt_dimer.extract_subsets(1, 2)
monomerA.set_name('monomerA')
monomerB = sapt_dimer.extract_subsets(2, 1)
monomerB.set_name('monomerB')
elif sapt_basis == 'monomer':
monomerA = sapt_dimer.extract_subsets(1)
monomerA.set_name('monomerA')
monomerB = sapt_dimer.extract_subsets(2)
monomerB.set_name('monomerB')
ri = core.get_option('SCF', 'SCF_TYPE')
df_ints_io = core.get_option('SCF', 'DF_INTS_IO')
# inquire if above at all applies to dfmp2
core.IO.set_default_namespace('dimer')
core.print_out('\n')
p4util.banner('Dimer HF')
core.print_out('\n')
# Compute dimer wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.set_global_option('DF_INTS_IO', 'SAVE')
dimer_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
if do_delta_mp2:
select_mp2(name, ref_wfn=dimer_wfn, **kwargs)
mp2_corl_interaction_e = core.get_variable('MP2 CORRELATION ENERGY')
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.set_global_option('DF_INTS_IO', 'LOAD')
# Compute Monomer A wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.IO.change_file_namespace(97, 'dimer', 'monomerA')
core.IO.set_default_namespace('monomerA')
core.print_out('\n')
p4util.banner('Monomer A HF')
core.print_out('\n')
monomerA_wfn = scf_helper('RHF', molecule=monomerA, **kwargs)
if do_delta_mp2:
select_mp2(name, ref_wfn=monomerA_wfn, **kwargs)
mp2_corl_interaction_e -= core.get_variable('MP2 CORRELATION ENERGY')
# Compute Monomer B wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
core.IO.set_default_namespace('monomerB')
core.print_out('\n')
p4util.banner('Monomer B HF')
core.print_out('\n')
monomerB_wfn = scf_helper('RHF', molecule=monomerB, **kwargs)
# Delta MP2
if do_delta_mp2:
select_mp2(name, ref_wfn=monomerB_wfn, **kwargs)
mp2_corl_interaction_e -= core.get_variable('MP2 CORRELATION ENERGY')
core.set_variable('SAPT MP2 CORRELATION ENERGY', mp2_corl_interaction_e)
core.set_global_option('DF_INTS_IO', df_ints_io)
if core.get_option('SCF', 'REFERENCE') == 'RHF':
core.IO.change_file_namespace(p4const.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(p4const.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
core.IO.set_default_namespace('dimer')
core.set_local_option('SAPT', 'E_CONVERGENCE', 10e-10)
core.set_local_option('SAPT', 'D_CONVERGENCE', 10e-10)
if name in ['sapt0', 'ssapt0']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT0')
elif name == 'sapt2':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2')
elif name in ['sapt2+', 'sapt2+dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+(3)', 'sapt2+(3)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+3', 'sapt2+3dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+(ccd)', 'sapt2+(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name in ['sapt2+(3)(ccd)', 'sapt2+(3)(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name in ['sapt2+3(ccd)', 'sapt2+3(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
# Make sure we are not going to run CPHF on ROHF, since its MO Hessian
# is not SPD
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
core.set_local_option('SAPT','COUPLED_INDUCTION',False)
core.print_out(' Coupled induction not available for ROHF.\n')
core.print_out(' Proceeding with uncoupled induction only.\n')
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_SAPT",
core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
if core.get_global_option("DF_BASIS_ELST") == "":
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
else:
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_ELST",
core.get_global_option("DF_BASIS_ELST"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
core.print_out('\n')
p4util.banner(name.upper())
core.print_out('\n')
e_sapt = core.sapt(dimer_wfn, monomerA_wfn, monomerB_wfn)
from psi4.driver.qcdb.psivardefs import sapt_psivars
p4util.expand_psivars(sapt_psivars())
optstash.restore()
for term in ['ELST', 'EXCH', 'IND', 'DISP', 'TOTAL']:
core.set_variable(' '.join(['SAPT', term, 'ENERGY']),
core.get_variable(' '.join([name.upper(), term, 'ENERGY'])))
core.set_variable('CURRENT ENERGY', core.get_variable('SAPT TOTAL ENERGY'))
return dimer_wfn
def run_sapt_ct(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a charge-transfer SAPT calcuation of any level.
"""
optstash = p4util.OptionsState(
['SCF', 'SCF_TYPE'])
if 'ref_wfn' in kwargs:
core.print_out('\nWarning! Argument ref_wfn is not valid for sapt computations\n')
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_local_option('SCF', 'SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer.update_geometry() # make sure since mol from wfn, kwarg, or P::e
sapt_dimer.fix_orientation(True)
sapt_dimer.fix_com(True)
# Shifting to C1 so we need to copy the active molecule
if sapt_dimer.schoenflies_symbol() != 'c1':
core.print_out(' SAPT does not make use of molecular symmetry, further calculations in C1 point group.\n')
sapt_dimer = sapt_dimer.clone()
sapt_dimer.reset_point_group('c1')
sapt_dimer.fix_orientation(True)
sapt_dimer.fix_com(True)
sapt_dimer.update_geometry()
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError('SAPT requires requires \"reference rhf\".')
nfrag = sapt_dimer.nfragments()
if nfrag != 2:
raise ValidationError('SAPT requires active molecule to have 2 fragments, not %s.' % (nfrag))
monomerA = sapt_dimer.extract_subsets(1, 2)
monomerA.set_name('monomerA')
monomerB = sapt_dimer.extract_subsets(2, 1)
monomerB.set_name('monomerB')
sapt_dimer.update_geometry()
monomerAm = sapt_dimer.extract_subsets(1)
monomerAm.set_name('monomerAm')
monomerBm = sapt_dimer.extract_subsets(2)
monomerBm.set_name('monomerBm')
ri = core.get_option('SCF', 'SCF_TYPE')
df_ints_io = core.get_option('SCF', 'DF_INTS_IO')
# inquire if above at all applies to dfmp2
core.IO.set_default_namespace('dimer')
core.print_out('\n')
p4util.banner('Dimer HF')
core.print_out('\n')
core.set_global_option('DF_INTS_IO', 'SAVE')
dimer_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.set_global_option('DF_INTS_IO', 'LOAD')
if (ri == 'DF'):
core.IO.change_file_namespace(97, 'dimer', 'monomerA')
core.IO.set_default_namespace('monomerA')
core.print_out('\n')
p4util.banner('Monomer A HF (Dimer Basis)')
core.print_out('\n')
monomerA_wfn = scf_helper('RHF', molecule=monomerA, **kwargs)
if (ri == 'DF'):
core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
core.IO.set_default_namespace('monomerB')
core.print_out('\n')
p4util.banner('Monomer B HF (Dimer Basis)')
core.print_out('\n')
monomerB_wfn = scf_helper('RHF', molecule=monomerB, **kwargs)
core.set_global_option('DF_INTS_IO', df_ints_io)
core.IO.set_default_namespace('monomerAm')
core.print_out('\n')
p4util.banner('Monomer A HF (Monomer Basis)')
core.print_out('\n')
monomerAm_wfn = scf_helper('RHF', molecule=monomerAm, **kwargs)
core.IO.set_default_namespace('monomerBm')
core.print_out('\n')
p4util.banner('Monomer B HF (Monomer Basis)')
core.print_out('\n')
monomerBm_wfn = scf_helper('RHF', molecule=monomerBm, **kwargs)
core.IO.set_default_namespace('dimer')
core.set_local_option('SAPT', 'E_CONVERGENCE', 10e-10)
core.set_local_option('SAPT', 'D_CONVERGENCE', 10e-10)
if name == 'sapt0-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT0')
elif name == 'sapt2-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2')
elif name == 'sapt2+-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
elif name == 'sapt2+(3)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
elif name == 'sapt2+3-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
elif name == 'sapt2+(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name == 'sapt2+(3)(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name == 'sapt2+3(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
core.print_out('\n')
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_SAPT",
core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
if core.get_global_option("DF_BASIS_ELST") == "":
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
else:
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_ELST",
core.get_global_option("DF_BASIS_ELST"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
core.print_out('\n')
p4util.banner('SAPT Charge Transfer')
core.print_out('\n')
core.print_out('\n')
p4util.banner('Dimer Basis SAPT')
core.print_out('\n')
core.IO.change_file_namespace(p4const.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(p4const.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
e_sapt = core.sapt(dimer_wfn, monomerA_wfn, monomerB_wfn)
CTd = core.get_variable('SAPT CT ENERGY')
core.print_out('\n')
p4util.banner('Monomer Basis SAPT')
core.print_out('\n')
core.IO.change_file_namespace(p4const.PSIF_SAPT_MONOMERA, 'monomerAm', 'dimer')
core.IO.change_file_namespace(p4const.PSIF_SAPT_MONOMERB, 'monomerBm', 'dimer')
e_sapt = core.sapt(dimer_wfn, monomerAm_wfn, monomerBm_wfn)
CTm = core.get_variable('SAPT CT ENERGY')
CT = CTd - CTm
units = (1000.0, p4const.psi_hartree2kcalmol, p4const.psi_hartree2kJmol)
core.print_out('\n\n')
core.print_out(' SAPT Charge Transfer Analysis\n')
core.print_out(' ------------------------------------------------------------------------------------------------\n')
core.print_out(' SAPT Induction (Dimer Basis) %12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n' %
tuple(CTd * u for u in units))
core.print_out(' SAPT Induction (Monomer Basis)%12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n' %
tuple(CTm * u for u in units))
core.print_out(' SAPT Charge Transfer %12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n\n' %
tuple(CT * u for u in units))
core.set_variable('SAPT CT ENERGY', CT)
optstash.restore()
return dimer_wfn
def run_fisapt(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an F/ISAPT0 computation
"""
optstash = p4util.OptionsState(
['SCF', 'SCF_TYPE'])
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_local_option('SCF', 'SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! FISAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer.update_geometry() # make sure since mol from wfn, kwarg, or P::e
# Shifting to C1 so we need to copy the active molecule
if sapt_dimer.schoenflies_symbol() != 'c1':
core.print_out(' FISAPT does not make use of molecular symmetry, further calculations in C1 point group.\n')
sapt_dimer = sapt_dimer.clone()
sapt_dimer.reset_point_group('c1')
sapt_dimer.fix_orientation(True)
sapt_dimer.fix_com(True)
sapt_dimer.update_geometry()
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError('FISAPT requires requires \"reference rhf\".')
if ref_wfn is None:
ref_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
sapt_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SAPT",
core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"),
ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SAPT", sapt_basis)
minao = core.BasisSet.build(ref_wfn.molecule(), "BASIS",
core.get_global_option("MINAO_BASIS"))
ref_wfn.set_basisset("MINAO", minao)
fisapt_wfn = core.fisapt(ref_wfn)
optstash.restore()
return fisapt_wfn
def run_mrcc(name, **kwargs):
"""Function that prepares environment and input files
for a calculation calling Kallay's MRCC code.
"""
# Check to see if we really need to run the SCF code.
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
vscf = core.get_variable('SCF TOTAL ENERGY')
# The parse_arbitrary_order method provides us the following information
# We require that level be provided. level is a dictionary
# of settings to be passed to core.mrcc
if not('level' in kwargs):
raise ValidationError('level parameter was not provided.')
level = kwargs['level']
# Fullname is the string we need to search for in iface
fullname = level['fullname']
# User can provide 'keep' to the method.
# When provided, do not delete the MRCC scratch directory.
keep = False
if 'keep' in kwargs:
keep = kwargs['keep']
# Save current directory location
current_directory = os.getcwd()
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Need to move to the scratch directory, perferrably into a separate directory in that location
psi_io = core.IOManager.shared_object()
os.chdir(psi_io.get_default_path())
# Make new directory specifically for mrcc
mrcc_tmpdir = 'mrcc_' + str(os.getpid())
if 'path' in kwargs:
mrcc_tmpdir = kwargs['path']
# Check to see if directory already exists, if not, create.
if os.path.exists(mrcc_tmpdir) is False:
os.mkdir(mrcc_tmpdir)
# Move into the new directory
os.chdir(mrcc_tmpdir)
# Generate integrals and input file (dumps files to the current directory)
core.mrcc_generate_input(ref_wfn, level)
# Load the fort.56 file
# and dump a copy into the outfile
core.print_out('\n===== Begin fort.56 input for MRCC ======\n')
core.print_out(open('fort.56', 'r').read())
core.print_out('===== End fort.56 input for MRCC ======\n')
# Close psi4 output file and reopen with filehandle
core.close_outfile()
pathfill = '' if os.path.isabs(core.outfile_name()) else current_directory + os.path.sep
p4out = open(pathfill + core.outfile_name(), 'a')
# Modify the environment:
# PGI Fortan prints warning to screen if STOP is used
os.environ['NO_STOP_MESSAGE'] = '1'
# Obtain user's OMP_NUM_THREADS so that we don't blow it away.
omp_num_threads_found = 'OMP_NUM_THREADS' in os.environ
if omp_num_threads_found == True:
omp_num_threads_user = os.environ['OMP_NUM_THREADS']
# If the user provided MRCC_OMP_NUM_THREADS set the environ to it
if core.has_option_changed('MRCC', 'MRCC_OMP_NUM_THREADS') == True:
os.environ['OMP_NUM_THREADS'] = str(core.get_option('MRCC', 'MRCC_OMP_NUM_THREADS'))
# Call dmrcc, directing all screen output to the output file
external_exe = 'dmrcc'
try:
retcode = subprocess.Popen([external_exe], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
p4out.write('Program %s not found in path or execution failed: %s\n' % (external_exe, e.strerror))
message = ("Program %s not found in path or execution failed: %s\n" % (external_exe, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
if core.outfile_name() == 'stdout':
sys.stdout.write(data)
else:
p4out.write(data)
p4out.flush()
c4out += data
# try:
# if core.outfile_name() == 'stdout':
# retcode = subprocess.call('dmrcc', shell=True, env=lenv)
# else:
# retcode = subprocess.call('dmrcc >> ' + current_directory + '/' + core.outfile_name(), shell=True, env=lenv)
#
# if retcode < 0:
# print('MRCC was terminated by signal %d' % -retcode, file=sys.stderr)
# exit(1)
# elif retcode > 0:
# print('MRCC errored %d' % retcode, file=sys.stderr)
# exit(1)
#
# except OSError as e:
# print('Execution failed: %s' % e, file=sys.stderr)
# exit(1)
# Restore the OMP_NUM_THREADS that the user set.
if omp_num_threads_found == True:
if core.has_option_changed('MRCC', 'MRCC_OMP_NUM_THREADS') == True:
os.environ['OMP_NUM_THREADS'] = omp_num_threads_user
# Scan iface file and grab the file energy.
ene = 0.0
for line in open('iface'):
fields = line.split()
m = fields[1]
try:
ene = float(fields[5])
if m == "MP(2)":
m = "MP2"
core.set_variable(m + ' TOTAL ENERGY', ene)
core.set_variable(m + ' CORRELATION ENERGY', ene - vscf)
except ValueError:
continue
# The last 'ene' in iface is the one the user requested.
core.set_variable('CURRENT ENERGY', ene)
core.set_variable('CURRENT CORRELATION ENERGY', ene - vscf)
# Load the iface file
iface = open('iface', 'r')
iface_contents = iface.read()
# Delete mrcc tempdir
os.chdir('..')
try:
# Delete unless we're told not to
if (keep is False and not('path' in kwargs)):
shutil.rmtree(mrcc_tmpdir)
except OSError as e:
print('Unable to remove MRCC temporary directory %s' % e, file=sys.stderr)
exit(1)
# Return to submission directory and reopen output file
os.chdir(current_directory)
p4out.close()
core.reopen_outfile()
# If we're told to keep the files or the user provided a path, do nothing.
if (keep != False or ('path' in kwargs)):
core.print_out('\nMRCC scratch files have been kept.\n')
core.print_out('They can be found in ' + mrcc_tmpdir)
# Dump iface contents to output
core.print_out('\n')
p4util.banner('Full results from MRCC')
core.print_out('\n')
core.print_out(iface_contents)
return ref_wfn
def run_fnodfcc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DF-CCSD(T) computation.
>>> set cc_type df
>>> energy('fno-ccsd(t)')
"""
kwargs = p4util.kwargs_lower(kwargs)
# stash user options
optstash = p4util.OptionsState(
['FNOCC', 'COMPUTE_TRIPLES'],
['FNOCC', 'DFCC'],
['FNOCC', 'NAT_ORBS'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'DF_BASIS_CC'],
['SCF', 'DF_BASIS_SCF'],
['SCF', 'DF_INTS_IO'],
['SCF', 'SCF_TYPE'])
core.set_local_option('FNOCC', 'DFCC', True)
core.set_local_option('FNOCC', 'RUN_CEPA', False)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError("""Error: %s requires 'reference rhf'.""" % name)
def set_cholesky_from(mtd_type):
type_val = core.get_global_option(mtd_type)
if type_val == 'CD':
core.set_local_option('FNOCC', 'DF_BASIS_CC', 'CHOLESKY')
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'CD')
core.print_out(""" SCF Algorithm Type (re)set to CD.\n""")
elif type_val == 'DF':
if core.get_option('FNOCC', 'DF_BASIS_CC') == 'CHOLESKY':
core.set_local_option('FNOCC', 'DF_BASIS_CC', '')
# Alter default algorithm
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
else:
raise ValidationError("""Invalid type '%s' for DFCC""" % type_val)
# triples?
if name == 'ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
set_cholesky_from('CC_TYPE')
elif name == 'ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
set_cholesky_from('CC_TYPE')
elif name == 'fno-ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
set_cholesky_from('CC_TYPE')
elif name == 'fno-ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
set_cholesky_from('CC_TYPE')
if core.get_option('SCF', 'SCF_TYPE') not in ['CD', 'DF']:
raise ValidationError("""Invalid scf_type for DFCC.""")
# save DF or CD ints generated by SCF for use in CC
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" FNOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
fnocc_wfn = core.fnocc(ref_wfn)
optstash.restore()
return fnocc_wfn
def run_fnocc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a QCISD(T), CCSD(T), MP2.5, MP3, and MP4 computation.
>>> energy('fno-ccsd(t)')
"""
kwargs = p4util.kwargs_lower(kwargs)
level = kwargs.get('level', 0)
# stash user options:
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['FNOCC', 'RUN_MP2'],
['FNOCC', 'RUN_MP3'],
['FNOCC', 'RUN_MP4'],
['FNOCC', 'RUN_CCSD'],
['FNOCC', 'COMPUTE_TRIPLES'],
['FNOCC', 'COMPUTE_MP4_TRIPLES'],
['FNOCC', 'DFCC'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'USE_DF_INTS'],
['FNOCC', 'NAT_ORBS'])
core.set_local_option('FNOCC', 'DFCC', False)
core.set_local_option('FNOCC', 'RUN_CEPA', False)
core.set_local_option('FNOCC', 'USE_DF_INTS', False)
# which method?
if name == 'ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
elif name == 'ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
elif name == 'fno-ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'qcisd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'qcisd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'fno-qcisd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-qcisd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'mp2':
core.set_local_option('FNOCC', 'RUN_MP2', True)
elif name == 'fno-mp3':
core.set_local_option('FNOCC', 'RUN_MP3', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-mp4':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', True)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'mp4(sdq)':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', False)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
elif name == 'fno-mp4(sdq)':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', False)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'mp3':
core.set_local_option('FNOCC', 'RUN_MP3', True)
elif name == 'mp4':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', True)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError("""Error: %s requires 'reference rhf'.""" % name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_option('FNOCC', 'USE_DF_INTS') == False:
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
else:
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
fnocc_wfn = core.fnocc(ref_wfn)
# set current correlation energy and total energy. only need to treat mpn here.
if name == 'mp3':
emp3 = core.get_variable("MP3 TOTAL ENERGY")
cemp3 = core.get_variable("MP3 CORRELATION ENERGY")
core.set_variable("CURRENT ENERGY", emp3)
core.set_variable("CURRENT CORRELATION ENERGY", cemp3)
elif name == 'fno-mp3':
emp3 = core.get_variable("MP3 TOTAL ENERGY")
cemp3 = core.get_variable("MP3 CORRELATION ENERGY")
core.set_variable("CURRENT ENERGY", emp3)
core.set_variable("CURRENT CORRELATION ENERGY", cemp3)
elif name == 'mp4(sdq)':
emp4sdq = core.get_variable("MP4(SDQ) TOTAL ENERGY")
cemp4sdq = core.get_variable("MP4(SDQ) CORRELATION ENERGY")
core.set_variable("CURRENT ENERGY", emp4sdq)
core.set_variable("CURRENT CORRELATION ENERGY", cemp4sdq)
elif name == 'fno-mp4(sdq)':
emp4sdq = core.get_variable("MP4(SDQ) TOTAL ENERGY")
cemp4sdq = core.get_variable("MP4(SDQ) CORRELATION ENERGY")
core.set_variable("CURRENT ENERGY", emp4sdq)
core.set_variable("CURRENT CORRELATION ENERGY", cemp4sdq)
elif name == 'fno-mp4':
emp4 = core.get_variable("MP4 TOTAL ENERGY")
cemp4 = core.get_variable("MP4 CORRELATION ENERGY")
core.set_variable("CURRENT ENERGY", emp4)
core.set_variable("CURRENT CORRELATION ENERGY", cemp4)
elif name == 'mp4':
emp4 = core.get_variable("MP4 TOTAL ENERGY")
cemp4 = core.get_variable("MP4 CORRELATION ENERGY")
core.set_variable("CURRENT ENERGY", emp4)
core.set_variable("CURRENT CORRELATION ENERGY", cemp4)
optstash.restore()
return fnocc_wfn
def run_cepa(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a cepa-like calculation.
>>> energy('cepa(1)')
"""
kwargs = p4util.kwargs_lower(kwargs)
# save user options
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['FNOCC', 'NAT_ORBS'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'USE_DF_INTS'],
['FNOCC', 'CEPA_NO_SINGLES'])
core.set_local_option('FNOCC', 'RUN_CEPA', True)
core.set_local_option('FNOCC', 'USE_DF_INTS', False)
# what type of cepa?
if name in ['lccd', 'fno-lccd']:
cepa_level = 'cepa(0)'
core.set_local_option('FNOCC', 'CEPA_NO_SINGLES', True)
elif name in ['cepa(0)', 'fno-cepa(0)', 'lccsd', 'fno-lccsd']:
cepa_level = 'cepa(0)'
core.set_local_option('FNOCC', 'CEPA_NO_SINGLES', False)
elif name in ['cepa(1)', 'fno-cepa(1)']:
cepa_level = 'cepa(1)'
elif name in ['cepa(3)', 'fno-cepa(3)']:
cepa_level = 'cepa(3)'
elif name in ['acpf', 'fno-acpf']:
cepa_level = 'acpf'
elif name in ['aqcc', 'fno-aqcc']:
cepa_level = 'aqcc'
elif name in ['cisd', 'fno-cisd']:
cepa_level = 'cisd'
else:
raise ValidationError("""Error: %s not implemented\n""" % name)
core.set_local_option('FNOCC', 'CEPA_LEVEL', cepa_level.upper())
if name in ['fno-lccd', 'fno-lccsd', 'fno-cepa(0)', 'fno-cepa(1)', 'fno-cepa(3)',
'fno-acpf', 'fno-aqcc', 'fno-cisd']:
core.set_local_option('FNOCC', 'NAT_ORBS', True)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError("""Error: %s requires 'reference rhf'.""" % name)
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_option('FNOCC', 'USE_DF_INTS') == False:
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
else:
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
fnocc_wfn = core.fnocc(ref_wfn)
# one-electron properties
if core.get_option('FNOCC', 'DIPMOM'):
if cepa_level in ['cepa(1)', 'cepa(3)']:
core.print_out("""\n Error: one-electron properties not implemented for %s\n\n""" % name)
elif core.get_option('FNOCC', 'NAT_ORBS'):
core.print_out("""\n Error: one-electron properties not implemented for %s\n\n""" % name)
else:
p4util.oeprop(fnocc_wfn, 'DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'NO_OCCUPATIONS', title=cepa_level.upper())
optstash.restore()
return fnocc_wfn
def run_detcas(name, **kwargs):
"""Function encoding sequence of PSI module calls for
determinant-based multireference wavefuncations,
namely CASSCF and RASSCF.
"""
optstash = p4util.OptionsState(
['DETCI', 'WFN'],
['SCF', 'SCF_TYPE']
)
user_ref = core.get_option('DETCI', 'REFERENCE')
if user_ref not in ['RHF', 'ROHF']:
raise ValidationError('Reference %s for DETCI is not available.' % user_ref)
if name == 'rasscf':
core.set_local_option('DETCI', 'WFN', 'RASSCF')
elif name == 'casscf':
core.set_local_option('DETCI', 'WFN', 'CASSCF')
else:
raise ValidationError("Run DETCAS: Name %s not understood" % name)
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_optstash = p4util.OptionsState(
['SCF_TYPE'],
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['ONEPDM'],
['OPDM_RELAX']
)
# No real reason to do a conventional guess
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# If RHF get MP2 NO's
# Why doesnt this work for conv?
if ((core.get_option('SCF', 'SCF_TYPE') == 'DF') and (user_ref == 'RHF') and
(core.get_option('DETCI', 'MCSCF_TYPE') in ['DF', 'AO']) and
(core.get_option("DETCI", "MCSCF_GUESS") == "MP2")):
core.set_global_option('ONEPDM', True)
core.set_global_option('OPDM_RELAX', False)
ref_wfn = run_dfmp2_gradient(name, **kwargs)
else:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
if (core.get_option('DETCI', 'MCSCF_TYPE') == 'CONV'):
mints = core.MintsHelper(ref_wfn.basisset())
mints.set_print(1)
mints.integrals()
ref_optstash.restore()
# The DF case
if core.get_option('DETCI', 'MCSCF_TYPE') == 'DF':
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
# The AO case
elif core.get_option('DETCI', 'MCSCF_TYPE') == 'AO':
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DIRECT')
# The conventional case
elif core.get_option('DETCI', 'MCSCF_TYPE') == 'CONV':
if not core.has_option_changed('SCF', 'SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'PK')
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_option('SCF', 'SCF_TYPE'), ref_wfn)
else:
raise ValidationError("Run DETCAS: MCSCF_TYPE %s not understood." % str(core.get_option('DETCI', 'MCSCF_TYPE')))
# Second-order SCF requires non-symmetric density matrix support
if core.get_option('DETCI', 'MCSCF_ALGORITHM') in ['AH', 'OS']:
proc_util.check_non_symmetric_jk_density("Second-order MCSCF")
ciwfn = mcscf.mcscf_solver(ref_wfn)
# We always would like to print a little dipole information
oeprop = core.OEProp(ciwfn)
oeprop.set_title(name.upper())
oeprop.add("DIPOLE")
oeprop.compute()
ciwfn.set_oeprop(oeprop)
core.set_variable("CURRENT DIPOLE X", core.get_variable(name.upper() + " DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.get_variable(name.upper() + " DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.get_variable(name.upper() + " DIPOLE Z"))
optstash.restore()
return ciwfn
def run_efp(name, **kwargs):
"""Function encoding sequence of module calls for a pure EFP
computation (ignore any QM atoms).
"""
# initialize library
efp = core.get_active_efp()
if efp.nfragments() == 0:
raise ValidationError("""Method 'efp' not available without EFP fragments in molecule""")
# set options
core.set_global_option('QMEFP', False) # apt to go haywire if set locally to efp
core.efp_set_options()
efp.print_out()
returnvalue = efp.compute()
return returnvalue
|
kannon92/psi4
|
psi4/driver/procedures/proc.py
|
Python
|
gpl-2.0
| 149,058
|
[
"Psi4"
] |
ab7123b07ef5de3de9eb59d1a61f3555d65836b47a38c4300e868a994187fc8d
|
"""-*-python-*-
suggested use:
place a dotted symlink to this
file in your home directory
ln -s /.../pythonrc ~/.pythonrc
and set the environment variable
PYTHONSTARTUP
to the symlink
export PYTHONSTARTUP="${HOME}/.pythonrc"
see also:
https://docs.python.org/3/using/cmdline.html#envvar-PYTHONSTARTUP
### todo
* conn gg Photos.
* droplet / bucket / gcloud
|_--< low-Trust of thumb drives AND spinning
..---++intro to req., here <<<EO_ThHyerOrdTHOT
prioritizing (FOT)
connect generic stor (DO droplet)
**after** _or_ **before**
domain-specific stor (gg Photos)
EO_ThHyerOrdTHOT
* plivo (or twilio fallback) text msgs
--
* lint, gofmt-alike, trailing whitespace
- pylintrc
- pylint ASTeroid checker(s), if only for example
- pre-commit
* auto-generate documentation
- sphinx -- seperate index for pyutils
- generate todos from this markdown format
* maintain and/or generate breakpoints list
|_--> keep a bunch && toggle, don't waste
time [staring at the computer, typ-
-ing `breakpoint()` over && over.
- also, cram a bunch of data into `tabulate`
and inject a special variable into pdb.
* tab-complete python names (w/ rope)
in at least one editor
--
* image view & editing
- scaled .
--
* music player
- "lock" mode: only responsive on specific keypress
( or sequence thereof) - displaying sequnce
on other key. thisisnot a security feature.
- read metadata
- access & cache album art, credits, etc.
from wikipedia, allmusic, bandcamp, etc.
- qtile integration (volume, disp toggle)
--- +eFmt(s)
- annot playback (eg Tuple[timestamp...])
(ie "verse|chorus|bridge", "measures
_ to _") within an indiv piece
- Dict[float, _]: playback speed to (eg) cpu usage
- mix *at most* two (Th) << multiproc
(eg "i can play [dylan|hendrix]'s _
over _. same song" 19',20' phenomenon)
(ie global annotations)
- equalizer & effects
* generative music / ambient noise
- binaural beats-type
- bytebeat
- etc.
*
......onecet an py-chemist has
.... <<will i want more>>
/\//____/\/\//\\/\///\/\/\\/\//\/
../ | /\ |\ |> get other phone
\/|\ | /--\ | \ |droid (from storage)
/\| \| / \| \|> install android build
西 \ | on a laptop
\_|__ (fuschia, 茹果渴能)
and leave this
to bury
"""
import typing
from typing import (
Dict,
List,
Hashable,
Tuple,
Union,
Sequence,
)
from collections import (
namedtuple,
)
import datetime as dt
from datetime import (
datetime as dto,
timedelta,
)
from functools import (
reduce,
partial,
wraps,
)
from inspect import (
getmembers as gm,
getsource,
getsourcefile as gsf,
getmodule,
ismodule,
isclass,
)
import operator as op
from operator import (
add,
itemgetter as ig,
attrgetter,
)
import os
import sys
import os.path as pth
import random
from pprint import (
# pp, 2.8+
pprint,
)
import math
from copy import (
deepcopy,
)
import pydoc
from pydoc import (
pager,
)
import curses
import curses.textpad
import subprocess
from subprocess import (
Popen,
PIPE,
)
import shlex
import configparser
import pathlib
from io import (
BytesIO,
StringIO,
)
from warnings import warn
import zipfile
import gzip
from gzip import (
GzipFile,
)
from tempfile import (
gettempdir,
mkdtemp,
)
import json
from importlib import (
reload,
)
import shutil
from shutil import (
copyfileobj,
)
import threading
import asyncio
import queue
import signal
import time
import ast
import socket
import ipaddress
import rlcompleter
import readline
### END stdlib imports import stdlib
readline.parse_and_bind("tab: complete")
###
def ppp(obj):
sio = StringIO()
pprint(obj, stream=sio)
pager(sio.getvalue())
def gmn(*args, **kwargs):
return [m[0] for m in gm(*args, **kwargs)]
def gs(*args, **kwargs):
pydoc.pager(getsource(*args, **kwargs))
def getsourcefiles(*args, **kwargs):
getsource_results = [getsource(arg, **kwargs)
for arg in args]
sources_string = (
'#<<<\n'+'\n#<<<\n'.join(getsource_results)
)
pydoc.pager(
sources_string
)
inc = lambda x: x + 1
dec = lambda x: x - 1
class Leaf:
pass
class Branch:
pass
class Tree:
pass
class NameSpace:
def __init__(self, obj):
self._obj = obj
def names(self):
if ismodule(self._obj):
return dir(self._obj)
elif isclass(self._obj):
return dir(self._obj)
assert False, repr(self._obj) + " is not a module or class"
def attrs(self):
return map(partial(getattr, self._obj), self.names())
def namespaces(self):
[Namespace(attr) for attr in self.attrs() if any(juxt(ismodule, isclass)(attr))]
def pysearch_name(name, maxdepth=3):
res = []
permissive_getattr = excepts(
(ModuleNotFoundError, AttributeError), partial(getattr), lambda _: None
)
def name_match(mname):
return name in mname
res += [sys.modules[mname] for mname in sys.modules.keys() if name_match(mname)]
def search_class(cls):
for mname in dir(cls):
if name_match(mname):
res.append(permissive_getattr(cls, mname))
def search_module(module, depth):
if depth > maxdepth:
return
if name in dir(module):
res.append(permissive_getattr(module, name))
for (mname, member) in [
(mname, permissive_getattr(module, mname)) for mname in dir(module)
]:
if not member:
continue
if name_match(mname):
res.append(member)
if isinstance(member, type):
search_class(member)
if ismodule(member):
search_module(member, depth + 1)
for mname in list(sys.modules.keys()):
search_module(sys.modules[mname], 0)
return res
ls = os.listdir
def cat(filepath, retstr=False):
with open(filepath) as f:
fstr = f.read()
if retstr:
return fstr
pydoc.pager(fstr)
run = partial(
subprocess.run,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# shell=True,
# check=True,
)
config = configparser.ConfigParser()
config.read(
[
os.path.join(os.path.dirname(os.path.realpath(__file__)), "python.conf"),
]
)
class PipInstallException(Exception):
pass
# todo: --cache-dir option,
# possibly following pre-commit cache strategy
# todo(???): build wheels
def pip_install(package_name):
name_to_specifier = {
name: config["package-specifiers"][name]
for name in config["package-specifiers"]
}
if package_name not in name_to_specifier:
if os.getenv("PY_DBG_IMPORTS"):
breakpoint()
raise PipInstallException("unknown package", (package_name,))
specifier = name_to_specifier[package_name]
cmd = ["pip", "install",] + (['-e',] if specifier.startswith('git+https://') else []) + [specifier,]
res = subprocess.run(cmd)
if res.returncode == 0:
return
raise PipInstallException("install failed", (res,))
###
_VENV_DIR = pth.join(str(pathlib.Path().home()), ".pyvenv")
_DEFAULT_VENV = pth.join(_VENV_DIR, "default")
if sys.prefix == sys.base_prefix:
if not os.getenv("PY_CREATE_VENV"):
raise Exception("not in venv. set PY_CREATE_VENV to create")
venv.create(_DEFAULT_VENV)
print(". " + pth.join(_DEFAULT_VENV, "bin", "activate"))
exit()
class ImportBlocker(object):
def __init__(self):
self.module_names = set()
self.package_names = set()
def find_module(self, fullname, path=None):
if fullname.split(".")[0] in self.package_names:
return self
if fullname in self.module_names:
return self
return None
def exec_module(self, mdl):
# return an empty namespace
return {}
def create_module(self, spec):
return None
import_blocker = ImportBlocker()
sys.meta_path.append(import_blocker)
AUTO_DBG=False
def my_except_hook(exctype, value, traceback):
if exctype is KeyboardInterrupt:
print("see you later!")
sys.__excepthook__(exctype, value, traceback)
if AUTO_DBG:
# breakpoint()
import pdb
pdb.pm() # post-mortem
def install_package(name):
pass
sys.excepthook = my_except_hook
while True:
try:
import toolz
import toolz.functoolz as ftlz
import toolz.itertoolz as itlz
import toolz.dicttoolz as dtlz
from toolz.functoolz import (
compose_left,
excepts,
compose,
curry,
flip,
juxt,
thread_last,
)
from toolz.itertoolz import (
accumulate,
concat,
concatv,
cons,
diff,
first,
isdistinct,
groupby,
mapcat,
nth,
unique,
)
from toolz.dicttoolz import (
keymap,
valmap,
itemmap,
)
from toolz.curried import (
get,
keyfilter,
valfilter,
itemfilter,
)
import numpy as np
import npyscreen
# without Qt install (see python.conf)
# gui `pymol.lauch([])` is inoperative
# and this package is at most useful
# to parse file formats (protien database -- .pdb), etc.
import pymol
import urwid
import tkinter
import astor
import rope
import rope.base.project
from rope.base import libutils as rope_libutils
from rope.base.project import (
Project as RopeProject,
)
from rope.base.resources import (
File as RopeFile,
Folder as RopeFolder,
Resource as RopeResource,
)
from rope.base.pyobjectsdef import (
PyModule as RopePyModule,
)
from rope.base.codeanalyze import (
SourceLinesAdapter as RopeSourceLinesAdapter,
)
from rope.base.change import (
ChangeSet as RopeChangeSet
)
import rope.refactor.move
from rope.refactor.move import (
MoveGlobal as RopeMoveGlobal,
MoveModule as RopeMoveModule,
)
import rope.refactor.multiproject
import rope.contrib.generate
import astor
from libqtile import qtile
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import pyutils
from pyutils.file_browse import (simple_file_browser_urwid,)
from pyutils.calendar import (ics_cal_busy_times_this_week,)
import pyutils.pyjuke as juke
from pyutils.pyjuke import webapp as juke_web
from pyutils import pastebin
from pyutils.pastebin import pastebin_app
from pyutils.cartography import osm
from pyutils.py_alarm_call.dashbd import (
dashbd,
)
from pyutils import cookbook as cb
from pyutils import scrape
from pyutils.text_edit import urwid as te
sys.path.pop()
except ModuleNotFoundError as err:
package_name = err.name
try:
print("attempting to install " + package_name)
pip_install(package_name)
except PipInstallException as ex:
if os.getenv("PY_DBG_IMPORTS"):
breakpoint()
import_blocker.package_names.add(package_name)
continue
break
# reset to orig
# sys.excepthook = sys.__excepthook__
uninstalled_packages = import_blocker.package_names.copy()
if uninstalled_packages:
print("uninstalled packages")
print(uninstalled_packages)
###
sfbrow_ur = simple_file_browser_urwid
def python_viewer_urwid(src):
""" stepping-stone towoard src editor:
use AST in parallel with text.
- syntax highlighting
- folding
- goto definition
- find occurences
- opt line no.s
"""
pass
import importlib.util
def get_pyutils_todos():
wr = os.walk(os.path.dirname(pyutils.__file__))
wrfs = []
for wri in wr:
for wrf in wri[2]:
wrfs += [os.path.join(wri[0], wrf)]
pfs = [wrf for wrf in wrfs
if os.path.splitext(wrf)[1] == '.py']
def pf_2_docstring(pf):
pfnne = os.path.splitext(os.path.basename(pf))[0]
if pfnne == '__init__':
mod_name = os.path.split(pf)[-2]
if pfnne == '__main__':
return ""
else:
mod_name = pfnne
spec = importlib.util.spec_from_file_location(
mod_name,
pf)
foo = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(foo)
except exception as exc:
print(exc)
breakpoint()
return foo.__doc__ or ''
def docstring_2_todos(module_docstring):
dslines = module_docstring.split('\n')
todo_lines = []
in_todo = False
for line in dslines:
if line.startswith('### todo'):
in_todo = True
continue
if not in_todo:
continue
if line == '':
break
todo_lines += [line]
if not todo_lines:
return []
bullets = ['*', '-']
if not todo_lines[0][0] in bullets:
lin = todo_lines[0]
l0 = lin[0]
breakpoint()
raise Exception()
return ' '.join(todo_lines)
todos = []
curr_todo_lines = [todo_lines[0]]
for line in todo_lines[1:]:
if line[0] in bullets:
todos += [' '.join(curr_todo_lines)]
curr_todo_lines = [line]
else:
curr_todo_lines += [line]
return todos
assts = []
for pf in pfs:
with open(pf, 'r') as f:
assts += [ast.parse(f.read())]
# spec = importlib.util.spec_from_file_location("module.name", "/path/to/file.py")
# foo = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(foo)
# foo.MyClass()
breakpoint()
bns = list(map(os.path.basename, pfs))
dss = []
for pf in pfs:
try:
ds = pf_2_docstring(pf)
except Exception as exc:
continue
dss += [ds]
tdaa = list(map(docstring_2_todos, dss))
todos = dict(zip(bns, tdaa))
ppp(todos)
return todos
def rope_get_pyutils_todos():
pyutils_rootdir = pth.dirname(pyutils.__file__)
pythonrc_rootdir = pth.dirname(pyutils_rootdir)
pyutils_project = RopeProject(pyutils_rootdir)
pythonrc_project = RopeProject(pythonrc_rootdir)
pj = pyutils_project
breakpoint()
# x = pyutils_project
# y = pythonrc_project
# pyutils_python_files = pyutils_project.get_python_files()
# breakpoint()
# pyutils_modules = list(map(pyutils_project.find_module,))
pyutils_modules = thread_last(
pyutils_project.get_python_files(),
(map, lambda file: os.path.splitext(file.name)),
(map, get(0)),
(map, lambda name: (pj.find_module(name)
and pj.get_module(name))),
(filter, lambda x: x is not None),
list,
)
x = pyutils_modules
breakpoint()
def module_to_todos(pyutils_module):
module_ast = pyutils_module.get_ast()
# filter(lambda node: isinstance module_ast.body
module_docstring = pyutils_module.get_ast().get_docstring()
dslines = module_docstring.split('\n')
todo_lines = []
in_todo = False
for line in dslines:
if line == '### todo':
in_todo = true
continue
if not in_todo:
continue
if line == '':
break
todo_lines += [line]
assert todo_lines[0][0] == '*'
todos = []
curr_todo_lines = [todo_lines[0]]
for line in todo_lines[1:]:
if line[0] == '*':
todos += [curr_todo_lines.join(' ')]
curr_todo_lines = [line]
else:
curr_todo_lines += [line]
return todos
{module.name : module_name_to_todos(module)
for module in pyutils_modules}
def rope_move_fn_from_pythonrc(fn_names, pyutils_pkg_name):
if '.' in pyutils_pkg_name:
raise ValueError()
pyutils_rootdir = pth.dirname(pyutils.__file__)
pythonrc_rootdir = pth.dirname(pyutils_rootdir)
# pyutils_project = RopeProject(pyutils_rootdir)
pythonrc_project = RopeProject(pythonrc_rootdir)
#
pyutils_pkg_specifier = 'pyutils.'+pyutils_pkg_name
def _close():
pythonrc_project.close()
def _ensure_pyutils_pkg(rope_project) -> RopeFolder:
found_pkg: RopeFolder = rope_project.find_module(pyutils_pkg_specifier)
if found_pkg is None:
new_pkg: RopeFolder = rope.contrib.generate.create_package(
rope_project, pyutils_pkg_specifier)
pyutils_pkg = new_pkg
else:
pyutils_pkg = found_pkg
assert isinstance(pyutils_pkg, (RopeResource,))
assert pyutils_pkg.name == pyutils_pkg_name
assert pyutils_pkg.is_folder()
name_to_pkg_file = {f.name: f for f in pyutils_pkg.get_files()}
assert '__init__.py' in name_to_pkg_file
init_py: RopeFile = name_to_pkg_file['__init__.py']
return pyutils_pkg
def _str_to_ast(fstr, fname):
""" astor.code_to_ast.parse_file extract """
fstr = fstr.replace('\r\n', '\n').replace('\r', '\n')
if not fstr.endswith('\n'):
fstr += '\n'
return ast.parse(fstr, filename=fname)
def _rope_file_to_ast(rope_file: RopeFile) -> ast.Module:
return _str_to_ast(rope_file.read(), rope_file.name)
def _rope_module_fn_bounds(rope_module: RopePyModule, func_name: str) -> ast.Module:
module_ast = rope_module.get_ast()
name_to_function_def: Dict[str, ast.FunctionDef] = {
node.name: node for node in module_ast.body
if isinstance(node, (ast.FunctionDef,))
}
if func_name not in name_to_function_def:
raise ValueError()
function_def = name_to_function_def[func_name]
bounds = {attr_name: getattr(function_def, attr_name)
for attr_name in
['lineno', 'end_lineno',
'col_offset', 'end_col_offset']}
module_lines_adapter: RopeSourceLinesAdapter = rope_module.lines
file_lines = [module_lines_adapter.get_line(lineno)
for lineno in range(module_lines_adapter.length())]
char = module_lines_adapter.get_line_start(
function_def.lineno
) + function_def.col_offset
end_char = module_lines_adapter.get_line_start(
function_def.end_lineno
) + function_def.end_col_offset
bounds.update(char=char, end_char=end_char)
return bounds
dest_pkg = _ensure_pyutils_pkg(pythonrc_project)
pythonrc_module: RopePyModule = pythonrc_project.get_module('pythonrc')
pythonrc_resource: RopeFile = pythonrc_module.get_resource()
def _make_move_obj_for_one_fn(fn_name):
fn_bounds = _rope_module_fn_bounds(pythonrc_module, fn_name)
if fn_bounds['col_offset'] != 0:
Exception("expected a top-level function", (fn_name, fn_bounds,))
move_offset = fn_bounds["char"] + len('def ')
if not isinstance(
rope.refactor.move.create_move(
pythonrc_project,
pythonrc_module.get_resource(),
move_offset,
), (
RopeMoveGlobal,
)):
raise RuntimeError()
from rope.refactor.move import _ChangeMoveOccurrencesHandle
from rope.refactor import occurrences
from rope.refactor.move import ModuleSkipRenamer
from rope.base import libutils
from rope.refactor import importutils
from rope.base.change import ChangeContents
class MoveGlobalKeepSrcImports(RopeMoveGlobal):
def _source_module_changes(self, dest):
placeholder = '__rope_moving_%s_' % self.old_name
handle = _ChangeMoveOccurrencesHandle(placeholder)
occurrence_finder = occurrences.create_finder(
self.project, self.old_name, self.old_pyname)
start, end = self._get_moving_region()
renamer = ModuleSkipRenamer(occurrence_finder, self.source,
handle, start, end)
source = renamer.get_changed_module()
pymodule = libutils.get_string_module(self.project, source, self.source)
#~ source = self.import_tools.organize_imports(pymodule, sort=False)
if handle.occurred:
pymodule = libutils.get_string_module(
self.project, source, self.source)
# Adding new import
source, imported = importutils.add_import(
self.project, pymodule, self._new_modname(dest), self.old_name)
source = source.replace(placeholder, imported)
return ChangeContents(self.source, source)
return MoveGlobalKeepSrcImports(
pythonrc_project,
pythonrc_module.get_resource(),
move_offset,
)
my_changes = None
for fn_name in fn_names:
my_move_obj = _make_move_obj_for_one_fn(fn_name)
fn_changes: RopeChangeSet = my_move_obj.get_changes(dest_pkg)
if my_changes is None:
my_changes = fn_changes
else:
my_changes.add_change(fn_changes)
my_changes_description = my_changes.get_description()
validate_src_res = pythonrc_project.validate(pythonrc_module.get_resource())
validate_dest_res = pythonrc_project.validate(dest_pkg)
validate_project_res = pythonrc_project.validate(pythonrc_project.root)
if (validate_src_res is not None or
validate_dest_res is not None or
validate_project_res is not None):
raise Exception("validation fail")
##
input('take a moment to view the changes')
pydoc.pager(my_changes_description)
proceed = None
while proceed is None:
proceed_txt = input("perform the move? >[yes/no]>> ").strip()
if proceed_txt not in ['yes', 'no',]:
print("please answer 'yes' or 'no'")
continue
proceed = proceed_txt == 'yes'
if not proceed:
_close()
return
pythonrc_project.do(my_changes)
_close()
return
###
def _mount_unmounted_usb_thumb__freebsd():
raise NotImplementedError()
import subprocess as sp
def _mount_unmounted_usb_thumb():
if (sp.run('uname', stdout=sp.PIPE, check=True).stdout.decode('utf-8').strip()
!=
'FreeBSD'):
raise NotImplementedError()
return _mount_unmounted_usb_thumb__freebsd()
mount_usb = lambda: _mount_unmounted_usb_thumb
#########################################
currdefns = {defn.__name__: defn for defn
in [
te.demo_edit_text,
osm.plot_osm_demo,
mount_usb,
pastebin.pastebin_app,
juke_web.flashcard_app,
]}
for varname in [
'mv_fn',
]:
pass
# todo: use `del` to unclutter locals()
#########################################
def thread_loop_demo():
mock_op = MagicMock()
mock_op.side_effect = lambda _: random.random() > 0.5
def thread_entry(thread_queue):
pass
thread_queue = queue.Queue()
loop_thread = threading.Thread(target=thread_entry, args=(thread_queue,), daemon=True)
loop_thread.start()
for idx in range(10):
thread_queue.put({"idx": idx})
|
ransomw/dotfiles
|
pythonrc.py
|
Python
|
apache-2.0
| 24,236
|
[
"PyMOL"
] |
3018d4c952d1968c39dd0e5b433ffd4b69d49ff823bad61f8b79cd48f3dc1b36
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Parsers for basis set in the NWChem format
'''
__all__ = ['parse', 'load', 'parse_ecp', 'load_ecp',
'convert_basis_to_nwchem', 'convert_ecp_to_nwchem',
'optimize_contraction', 'remove_zero', 'to_general_contraction']
import re
import numpy
import numpy as np
import scipy.linalg
from pyscf.data.elements import _std_symbol
from pyscf.lib.exceptions import BasisNotFoundError
from pyscf import __config__
DISABLE_EVAL = getattr(__config__, 'DISABLE_EVAL', False)
MAXL = 15
SPDF = 'SPDFGHIKLMNORTU'
MAPSPDF = {key: l for l, key in enumerate(SPDF)}
BASIS_SET_DELIMITER = re.compile('# *BASIS SET.*\n|END\n')
ECP_DELIMITER = re.compile('\n *ECP *\n')
def parse(string, symb=None, optimize=True):
'''Parse the basis text which is in NWChem format. Return an internal
basis format which can be assigned to attribute :attr:`Mole.basis`
Empty lines, or the lines started with #, or the lines of "BASIS SET" and
"END" will be ignored are ignored.
Args:
string : A string in NWChem basis format. Empty links and the lines of
"BASIS SET" and "END" will be ignored
Kwargs:
optimize : Optimize basis contraction. Convert the segment contracted
basis to the general contracted basis.
Examples:
>>> mol = gto.Mole()
>>> mol.basis = {'O': gto.basis.parse("""
... #BASIS SET: (6s,3p) -> [2s,1p]
... C S
... 71.6168370 0.15432897
... 13.0450960 0.53532814
... 3.5305122 0.44463454
... C SP
... 2.9412494 -0.09996723 0.15591627
... 0.6834831 0.39951283 0.60768372
... 0.2222899 0.70011547 0.39195739
... """)}
>>> gto.basis.parse("""
... He S
... 13.6267000 0.1752300
... 1.9993500 0.8934830
... 0.3829930 0.0000000
... He S
... 13.6267000 0.0000000
... 1.9993500 0.0000000
... 0.3829930 1.0000000
... """, optimize=True)
[[0, [13.6267, 0.17523, 0.0], [1.99935, 0.893483, 0.0], [0.382993, 0.0, 1.0]]]
'''
if symb is not None:
symb = _std_symbol(symb)
string = _search_basis_block(re.split(BASIS_SET_DELIMITER, string), symb)
if not string:
raise BasisNotFoundError('Basis not found for %s' % symb)
raw_basis = []
for dat in string.splitlines():
dat = dat.split('#')[0].strip() # Use # to start comments
dat_upper = dat.upper()
if (dat and not dat_upper.startswith('END') and not dat_upper.startswith('BASIS')):
raw_basis.append(dat)
return _parse(raw_basis, optimize)
def load(basisfile, symb, optimize=True):
raw_basis = search_seg(basisfile, symb)
return _parse(raw_basis, optimize)
def _parse(raw_basis, optimize=True):
basis_parsed = [[] for l in range(MAXL)]
key = None
for line in raw_basis:
dat = line.strip()
if not dat or dat.startswith('#'):
continue
elif dat[0].isalpha():
key = dat.split()[1].upper()
if key == 'SP':
basis_parsed[0].append([0])
basis_parsed[1].append([1])
else:
l = MAPSPDF[key]
current_basis = [l]
basis_parsed[l].append(current_basis)
else:
dat = dat.replace('D','e').split()
try:
dat = [float(x) for x in dat]
except ValueError:
if DISABLE_EVAL:
raise ValueError('Failed to parse basis %s' % line)
else:
dat = list(eval(','.join(dat)))
except Exception as e:
raise BasisNotFoundError('\n' + str(e) +
'\nor the required basis file not existed.')
if key is None:
raise RuntimeError('Failed to parse basis')
elif key == 'SP':
basis_parsed[0][-1].append([dat[0], dat[1]])
basis_parsed[1][-1].append([dat[0], dat[2]])
else:
current_basis.append(dat)
basis_sorted = [b for bs in basis_parsed for b in bs]
if optimize:
basis_sorted = optimize_contraction(basis_sorted)
basis_sorted = remove_zero(basis_sorted)
return basis_sorted
def parse_ecp(string, symb=None):
if symb is not None:
symb = _std_symbol(symb)
raw_data = string.splitlines()
for i, dat in enumerate(raw_data):
dat0 = dat.split(None, 1)
if dat0 and dat0[0] == symb:
break
if i+1 == len(raw_data):
raise BasisNotFoundError('ECP not found for %s' % symb)
seg = []
for dat in raw_data[i:]:
dat = dat.strip()
if dat: # remove empty lines
if ((dat[0].isalpha() and dat.split(None, 1)[0].upper() != symb.upper())):
break
else:
seg.append(dat)
else:
seg = string.splitlines()
ecptxt = []
for dat in seg:
dat = dat.split('#')[0].strip()
dat_upper = dat.upper()
if (dat and not dat_upper.startswith('END') and not dat_upper.startswith('ECP')):
ecptxt.append(dat)
return _parse_ecp(ecptxt)
def _parse_ecp(raw_ecp):
ecp_add = []
nelec = None
for line in raw_ecp:
dat = line.strip()
if not dat or dat.startswith('#'): # comment line
continue
elif dat[0].isalpha():
key = dat.split()[1].upper()
if key == 'NELEC':
nelec = int(dat.split()[2])
continue
elif key == 'UL':
ecp_add.append([-1])
else:
ecp_add.append([MAPSPDF[key]])
# up to r^6
by_ang = [[] for i in range(7)]
ecp_add[-1].append(by_ang)
else:
line = dat.replace('D','e').split()
l = int(line[0])
try:
coef = [float(x) for x in line[1:]]
except ValueError:
if DISABLE_EVAL:
raise ValueError('Failed to parse ecp %s' % line)
else:
coef = list(eval(','.join(line[1:])))
by_ang[l].append(coef)
if nelec is None:
return []
else:
bsort = []
for l in range(-1, MAXL):
bsort.extend([b for b in ecp_add if b[0] == l])
return [nelec, bsort]
def load_ecp(basisfile, symb):
return _parse_ecp(search_ecp(basisfile, symb))
def search_seg(basisfile, symb):
symb = _std_symbol(symb)
with open(basisfile, 'r') as fin:
fdata = re.split(BASIS_SET_DELIMITER, fin.read())
raw_basis = _search_basis_block(fdata, symb)
return [x for x in raw_basis.splitlines() if x and 'END' not in x]
def _search_basis_block(raw_data, symb):
raw_basis = ''
for dat in raw_data:
dat0 = dat.split(None, 1)
if dat0 and dat0[0] == symb:
raw_basis = dat
break
return raw_basis
def search_ecp(basisfile, symb):
symb = _std_symbol(symb)
with open(basisfile, 'r') as fin:
fdata = re.split(ECP_DELIMITER, fin.read())
if len(fdata) <= 1:
return []
fdata = fdata[1].splitlines()
for i, dat in enumerate(fdata):
dat0 = dat.split(None, 1)
if dat0 and dat0[0] == symb:
break
seg = []
for dat in fdata[i:]:
dat = dat.strip()
if dat: # remove empty lines
if ((dat[0].isalpha() and dat.split(None, 1)[0].upper() != symb.upper())):
return seg
else:
seg.append(dat)
return []
def convert_basis_to_nwchem(symb, basis):
'''Convert the internal basis format to NWChem format string'''
res = []
symb = _std_symbol(symb)
# pass 1: comment line
ls = [b[0] for b in basis]
nprims = [len(b[1:]) for b in basis]
nctrs = [len(b[1])-1 for b in basis]
prim_to_ctr = {}
for i, l in enumerate(ls):
if l in prim_to_ctr:
prim_to_ctr[l][0] += nprims[i]
prim_to_ctr[l][1] += nctrs[i]
else:
prim_to_ctr[l] = [nprims[i], nctrs[i]]
nprims = []
nctrs = []
for l in set(ls):
nprims.append(str(prim_to_ctr[l][0])+SPDF[l].lower())
nctrs.append(str(prim_to_ctr[l][1])+SPDF[l].lower())
res.append('#BASIS SET: (%s) -> [%s]' % (','.join(nprims), ','.join(nctrs)))
# pass 2: basis data
for bas in basis:
res.append('%-2s %s' % (symb, SPDF[bas[0]]))
for dat in bas[1:]:
res.append(' '.join('%15.9f'%x for x in dat))
return '\n'.join(res)
def convert_ecp_to_nwchem(symb, ecp):
'''Convert the internal ecp format to NWChem format string'''
symb = _std_symbol(symb)
res = ['%-2s nelec %d' % (symb, ecp[0])]
for ecp_block in ecp[1]:
l = ecp_block[0]
if l == -1:
res.append('%-2s ul' % symb)
else:
res.append('%-2s %s' % (symb, SPDF[l].lower()))
for r_order, dat in enumerate(ecp_block[1]):
for e,c in dat:
res.append('%d %15.9f %15.9f' % (r_order, e, c))
return '\n'.join(res)
def optimize_contraction(basis):
'''Search the basis segments which have the same exponents then merge them
to the general contracted sets.
Note the difference to the function :func:`to_general_contraction`. The
return value of this function may still have multiple segments for each
angular moment section.
'''
basdic = {}
for b in basis:
if isinstance(b[1], int): # kappa = b[1]
key = tuple(b[:2])
ec = numpy.array(b[2:]).T
else:
key = tuple(b[:1])
ec = numpy.array(b[1:]).T
es = ec[0]
cs = [c for c in ec[1:]]
if key not in basdic:
basdic[key] = []
if basdic[key]:
for e_cs in basdic[key]:
if numpy.array_equal(e_cs[0], es):
e_cs.extend(cs)
break
else:
basdic[key].append([es] + cs)
else:
basdic[key].append([es] + cs)
basis = []
for key in sorted(basdic.keys()):
l_kappa = list(key)
for e_cs in basdic[key]:
b = l_kappa + numpy.array(e_cs).T.tolist()
basis.append(b)
return basis
def to_general_contraction(basis):
'''Segmented contracted basis -> general contracted basis.
Combine multiple basis segments to one segment for each angular moment
section.
Examples:
>>> gto.contract(gto.uncontract(gto.load('sto3g', 'He')))
[[0, [6.36242139, 1.0, 0.0, 0.0], [1.158923, 0.0, 1.0, 0.0], [0.31364979, 0.0, 0.0, 1.0]]]
'''
basdic = {}
for b in basis:
if isinstance(b[1], int): # kappa = b[1]
key = tuple(b[:2])
ec = numpy.array(b[2:])
else:
key = tuple(b[:1])
ec = numpy.array(b[1:])
if key in basdic:
basdic[key].append(ec)
else:
basdic[key] = [ec]
basis = []
for key in sorted(basdic.keys()):
l_kappa = list(key)
es = numpy.hstack([ec[:,0] for ec in basdic[key]])
cs = scipy.linalg.block_diag(*[ec[:,1:] for ec in basdic[key]])
es, e_idx, rev_idx = numpy.unique(es.round(9), True, True)
es = es[::-1] # sort the exponents from large to small
bcoeff = numpy.zeros((e_idx.size, cs.shape[1]))
for i, j in enumerate(rev_idx):
bcoeff[j] += cs[i]
bcoeff = bcoeff[::-1]
ec = numpy.hstack((es[:,None], bcoeff))
basis.append(l_kappa + ec.tolist())
return basis
def remove_zero(basis):
'''
Remove exponents if their contraction coefficients are all zeros.
'''
new_basis = []
for b in basis:
if isinstance(b[1], int): # kappa = b[1]
key = list(b[:2])
ec = b[2:]
else:
key = list(b[:1])
ec = b[1:]
new_ec = [e_c for e_c in ec if any(c!=0 for c in e_c[1:])]
if new_ec:
new_basis.append(key + new_ec)
return new_basis
if __name__ == '__main__':
from pyscf import gto
mol = gto.M(atom='O', basis='6-31g')
print(load_ecp('lanl2dz.dat', 'Na'))
b = load('ano.dat', 'Na')
print(convert_basis_to_nwchem('Na', b))
b = load_ecp('lanl2dz.dat', 'Na')
print(convert_ecp_to_nwchem('Na', b))
|
sunqm/pyscf
|
pyscf/gto/basis/parse_nwchem.py
|
Python
|
apache-2.0
| 13,409
|
[
"NWChem",
"PySCF"
] |
3ef7f56b8c9a0ad0f034ec073ec54e18a3bcada9b68e88975d02e978110eb578
|
########################################################################
# $HeadURL$
########################################################################
""" NotificationDB class is a front-end to the Notifications database
"""
__RCSID__ = "$Id$"
import time
import types
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.Mail import Mail
from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Security import CS
class NotificationDB( DB ):
def __init__( self, maxQueueSize = 10 ):
DB.__init__( self, 'NotificationDB', 'Framework/NotificationDB', maxQueueSize )
result = self.__initializeDB()
if not result[ 'OK' ]:
self.log.fatal( "Cannot initialize DB!", result[ 'Message' ] )
self.__alarmQueryFields = [ 'alarmid', 'author', 'creationtime', 'modtime', 'subject',
'status', 'priority', 'notifications', 'body', 'assignee', 'alarmkey' ]
self.__alarmLogFields = [ 'timestamp', 'author', 'comment', 'modifications' ]
self.__notificationQueryFields = ( 'id', 'user', 'seen', 'message', 'timestamp' )
self.__newAlarmMandatoryFields = [ 'author', 'subject', 'status', 'notifications', 'body', 'assignee', 'priority' ]
self.__updateAlarmIdentificationFields = [ 'id', 'alarmKey' ]
self.__updateAlarmMandatoryFields = [ 'author' ]
self.__updateAlarmAtLeastOneField = [ 'comment', 'modifications' ]
self.__updateAlarmModificableFields = [ 'status', 'assignee', 'priority' ]
self.__validAlarmStatus = [ 'Open', 'OnGoing', 'Closed', 'Testing' ]
self.__validAlarmNotifications = [ 'Web', 'Mail', 'SMS' ]
self.__validAlarmPriorities = [ 'Low', 'Medium', 'High', 'Extreme' ]
def __initializeDB( self ):
retVal = self._query( "show tables" )
if not retVal[ 'OK' ]:
return retVal
tablesInDB = [ t[0] for t in retVal[ 'Value' ] ]
tablesToCreate = {}
if 'ntf_Alarms' not in tablesInDB:
tablesToCreate[ 'ntf_Alarms' ] = { 'Fields' : { 'AlarmId' : 'INTEGER UNSIGNED AUTO_INCREMENT NOT NULL',
'AlarmKey' : 'VARCHAR(32) NOT NULL',
'Author' : 'VARCHAR(64) NOT NULL',
'CreationTime' : 'DATETIME NOT NULL',
'ModTime' : 'DATETIME NOT NULL',
'Subject' : 'VARCHAR(255) NOT NULL',
'Status' : 'VARCHAR(64) NOT NULL',
'Priority' : 'VARCHAR(32) NOT NULL',
'Body' : 'BLOB',
'Assignee' : 'VARCHAR(64) NOT NULL',
'Notifications' : 'VARCHAR(128) NOT NULL'
},
'PrimaryKey' : 'AlarmId',
'Indexes' : { 'Status' : [ 'Status' ],
'Assignee' : [ 'Assignee' ] }
}
if 'ntf_AssigneeGroups' not in tablesInDB:
tablesToCreate[ 'ntf_AssigneeGroups' ] = { 'Fields' : { 'AssigneeGroup' : 'VARCHAR(64) NOT NULL',
'User' : 'VARCHAR(64) NOT NULL',
},
'Indexes' : { 'ag' : [ 'AssigneeGroup' ] }
}
if 'ntf_AlarmLog' not in tablesInDB:
tablesToCreate[ 'ntf_AlarmLog' ] = { 'Fields' : { 'AlarmId' : 'INTEGER UNSIGNED NOT NULL',
'Timestamp' : 'DATETIME NOT NULL',
'Author' : 'VARCHAR(64) NOT NULL',
'Comment' : 'BLOB',
'Modifications' : 'VARCHAR(255)',
},
'Indexes' : { 'AlarmID' : [ 'AlarmId' ] }
}
if 'ntf_AlarmFollowers' not in tablesInDB:
tablesToCreate[ 'ntf_AlarmFollowers' ] = { 'Fields' : { 'AlarmId' : 'INTEGER UNSIGNED NOT NULL',
'User' : 'VARCHAR(64) NOT NULL',
'Mail' : 'TINYINT(1) DEFAULT 0',
'Notification' : 'TINYINT(1) DEFAULT 1',
'SMS' : 'TINYINT(1) DEFAULT 0',
},
'Indexes' : { 'AlarmID' : [ 'AlarmId' ] }
}
if 'ntf_Notifications' not in tablesInDB:
tablesToCreate[ 'ntf_Notifications' ] = { 'Fields' : { 'Id' : 'INTEGER UNSIGNED AUTO_INCREMENT NOT NULL',
'User' : 'VARCHAR(64) NOT NULL',
'Message' : 'BLOB NOT NULL',
'Seen' : 'TINYINT(1) NOT NULL DEFAULT 0',
'Expiration' : 'DATETIME',
'Timestamp' : 'DATETIME',
'DeferToMail' : 'TINYINT(1) NOT NULL DEFAULT 1',
},
'PrimaryKey' : 'Id',
}
if tablesToCreate:
result = self._createTables( tablesToCreate )
if result['OK'] and result['Value']:
self.log.info( "NotificationDB: created tables %s" % result['Value'] )
return result
return S_OK()
def __checkAlarmField( self, name, value ):
name = name.lower()
if name == 'status':
if value not in self.__validAlarmStatus:
return S_ERROR( "Status %s is invalid. Valid ones are: %s" % ( value, self.__validAlarmStatus ) )
elif name == 'priority':
if value not in self.__validAlarmPriorities:
return S_ERROR( "Type %s is invalid. Valid ones are: %s" % ( value, self.__validAlarmPriorities ) )
elif name == 'assignee':
result = self.getUserAsignees( value )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
return S_ERROR( "%s is not a known assignee" % value )
return result
return S_OK()
def newAlarm( self, alarmDef ):
""" Create a new alarm record
"""
followers = ""
for field in self.__newAlarmMandatoryFields:
if field not in alarmDef:
return S_ERROR( "Oops. Missing %s" % field )
result = self.__checkAlarmField( field, alarmDef[ field ] )
if not result[ 'OK' ]:
return result
if field == 'assignee':
followers = result[ 'Value' ]
author = alarmDef[ 'author' ]
if author not in followers:
followers.append( author )
sqlFieldsName = []
sqlFieldsValue = []
for field in self.__newAlarmMandatoryFields:
if field == 'notifications':
notifications = {}
for type in self.__validAlarmNotifications:
if type in alarmDef[ field ]:
notifications[ type ] = 1
else:
notifications[ type ] = 0
val = DEncode.encode( notifications )
else:
val = alarmDef[ field ]
#Add to the list of fields to add
sqlFieldsName.append( field )
result = self._escapeString( val )
if result['OK']:
sqlFieldsValue.append( result['Value'] )
else:
return S_ERROR( 'Failed to escape value %s' % val )
sqlFieldsName.extend( [ 'CreationTime', 'ModTime' ] )
sqlFieldsValue.extend( [ 'UTC_TIMESTAMP()', 'UTC_TIMESTAMP()' ] )
#Get the defined alarmkey and generate a random one if not defined
if 'alarmKey' in alarmDef:
result = self._escapeString( alarmDef[ 'alarmKey' ] )
if result['OK']:
alarmKey = result['Value']
else:
return S_ERROR( 'Failed to escape value %s for key AlarmKey' % val )
gLogger.info( "Checking there are no alarms with key %s" % alarmKey )
result = self._query( "SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmKey=%s" % alarmKey )
if not result[ 'OK' ]:
return result
if result[ 'Value' ]:
return S_ERROR( "Oops, alarm with id %s has the same alarm key!" % result[ 'Value' ][0][0] )
else:
alarmKey = str( time.time() )[-31:]
sqlFieldsName.append( 'AlarmKey' )
sqlFieldsValue.append( alarmKey )
sqlInsert = "INSERT INTO `ntf_Alarms` (%s) VALUES (%s)" % ( ",".join( sqlFieldsName ),
",".join( sqlFieldsValue ) )
result = self._update( sqlInsert )
if not result['OK']:
return result
alarmId = result[ 'lastRowId' ]
for follower in followers:
result = self.modifyFollowerForAlarm( alarmId, follower, notifications )
if not result[ 'OK' ]:
varMsg = "\nFollower: %s\nAlarm: %s\nError: %s" % ( follower, alarmId, result['Message'] )
self.log.error( "Couldn't set follower for alarm", varMsg )
self.__notifyAlarm( alarmId )
return S_OK( alarmId )
def deleteAlarmsByAlarmKey( self, alarmKeyList ):
alarmsIdList = []
for alarmKey in alarmKeyList:
result = self.__getAlarmIdFromKey( alarmKey )
if not result[ 'OK' ]:
return result
alarmId = result[ 'Value' ]
alarmsIdList.append( alarmId )
self.log.info( "Trying to delete alarms with:\n alamKey %s\n alarmId %s" % ( alarmKeyList, alarmsIdList ) )
return self.deleteAlarmsByAlarmId( alarmsIdList )
def deleteAlarmsByAlarmId( self, alarmIdList ):
self.log.info( "Trying to delete alarms with ids %s" % alarmIdList )
try:
alarmId = int( alarmIdList )
alarmIdList = [ alarmId ]
except:
pass
try:
alarmIdList = [ int( alarmId ) for alarmId in alarmIdList ]
except:
self.log.error( "At least one alarmId is not a number", str( alarmIdList ) )
return S_ERROR( "At least one alarmId is not a number: %s" % str( alarmIdList ) )
tablesToCheck = ( "ntf_AlarmLog", "ntf_AlarmFollowers", "ntf_Alarms" )
alamsSQLList = ",".join( [ "%d" % alarmId for alarmId in alarmIdList ] )
for tableName in tablesToCheck:
delSql = "DELETE FROM `%s` WHERE AlarmId in ( %s )" % ( tableName, alamsSQLList )
result = self._update( delSql )
if not result[ 'OK' ]:
self.log.error( "Could not delete alarm", "from table %s: %s" % ( tableName, result[ 'Message' ] ) )
return S_OK()
def __processUpdateAlarmModifications( self, modifications ):
if type( modifications ) != types.DictType:
return S_ERROR( "Modifications must be a dictionary" )
updateFields = []
followers = []
for field in modifications:
if field not in self.__updateAlarmModificableFields:
return S_ERROR( "%s is not a valid modificable field" % field )
value = modifications[ field ]
result = self.__checkAlarmField( field , value )
if not result[ 'OK' ]:
return result
if field == 'assignee':
followers = result[ 'Value' ]
result = self._escapeString( modifications[ field ] )
if not result[ 'OK' ]:
return result
updateFields.append( "%s=%s" % ( field, result[ 'Value' ] ) )
return S_OK( ( ", ".join( updateFields ), DEncode.encode( modifications ), followers ) )
def __getAlarmIdFromKey( self, alarmKey ):
result = self._escapeString( alarmKey )
if not result[ 'OK' ]:
return S_ERROR( "Cannot escape alarmKey %s" % alarmKey )
alarmKey = result[ 'Value' ]
sqlQuery = "SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmKey=%s" % alarmKey
result = self._query( sqlQuery )
if result[ 'OK' ]:
result[ 'Value' ] = result[ 'Value' ][0][0]
return result
def updateAlarm( self, updateReq ):
#Discover alarm identification
idOK = False
for field in self.__updateAlarmIdentificationFields:
if field in updateReq:
idOK = True
if not idOK:
return S_ERROR( "Need at least one field to identify which alarm to update! %s" % self.__updateAlarmIdentificationFields )
if 'alarmKey' in updateReq:
alarmKey = updateReq[ 'alarmKey' ]
result = self.__getAlarmIdFromKey( alarmKey )
if not result[ 'OK' ]:
self.log.error( "Could not get alarm id for key", " %s: %s" % ( alarmKey, result[ 'Value' ] ) )
return result
updateReq[ 'id' ] = result[ 'Value' ]
self.log.info( "Retrieving alarm key %s maps to id %s" % ( alarmKey, updateReq[ 'id' ] ) )
#Check fields
for field in self.__updateAlarmMandatoryFields:
if field not in updateReq:
return S_ERROR( "Oops. Missing %s" % field )
validReq = False
for field in self.__updateAlarmAtLeastOneField:
if field in updateReq:
validReq = True
if not validReq:
return S_OK( "Requirement needs at least one of %s" % " ".join( self.__updateAlarmAtLeastOneField ) )
author = updateReq[ 'author' ]
followers = [ author ]
if author not in CS.getAllUsers():
return S_ERROR( "%s is not a known user" % author )
result = self._escapeString( author )
if not result[ 'OK' ]:
return result
author = result[ 'Value' ]
try:
alarmId = int( updateReq[ 'id' ] )
except:
return S_ERROR( "Oops, Alarm id is not valid! (bad boy...)" )
result = self._query( "SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmId=%d" % alarmId )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
return S_ERROR( "Alarm %s does not exist!" % alarmId )
sqlFields = [ 'AlarmId', 'Author', 'Timestamp' ]
sqlValues = [ "%d" % alarmId, author, 'UTC_TIMESTAMP()' ]
rawComment = ""
if 'comment' in updateReq:
rawComment = updateReq[ 'comment' ]
result = self._escapeString( rawComment )
if not result[ 'OK' ]:
return result
sqlFields.append( "Comment" )
sqlValues.append( result[ 'Value' ] )
modifications = False
if 'modifications' in updateReq:
modifications = updateReq[ 'modifications' ]
result = self.__processUpdateAlarmModifications( modifications )
if not result[ 'OK' ]:
return result
alarmModsSQL, encodedMods, newFollowers = result[ 'Value' ]
sqlFields.append( "Modifications" )
result = self._escapeString( encodedMods )
if not result[ 'OK' ]:
return result
sqlValues.append( result[ 'Value' ] )
if newFollowers:
followers.extend( newFollowers )
logSQL = "INSERT INTO `ntf_AlarmLog` (%s) VALUES (%s)" % ( ",".join( sqlFields ), ",".join( sqlValues ) )
result = self._update( logSQL )
if not result[ 'OK' ]:
return result
modSQL = "ModTime=UTC_TIMESTAMP()"
if modifications:
modSQL = "%s, %s" % ( modSQL, alarmModsSQL )
updateSQL = "UPDATE `ntf_Alarms` SET %s WHERE AlarmId=%d" % ( modSQL, alarmId )
result = self._update( updateSQL )
if not result[ 'OK' ]:
return result
#Get notifications config
sqlQuery = "SELECT Notifications FROM `ntf_Alarms` WHERE AlarmId=%s" % alarmId
result = self._query( sqlQuery )
if not result[ 'OK' ] or not result[ 'Value' ]:
self.log.error( "Could not retrieve default notifications for alarm", "%s" % alarmId )
return S_OK( alarmId )
notificationsDict = DEncode.decode( result[ 'Value' ][0][0] )[0]
for v in self.__validAlarmNotifications:
if v not in notificationsDict:
notificationsDict[ v ] = 0
for follower in followers:
result = self.modifyFollowerForAlarm( alarmId, follower, notificationsDict, overwrite = False )
if not result[ 'OK' ]:
varMsg = "\nFollower: %s\nAlarm: %s\nError: %s" % ( follower, alarmId, result['Message'] )
self.log.error( "Couldn't set follower for alarm", varMsg )
return self.__notifyAlarm( alarmId )
def __notifyAlarm( self, alarmId ):
result = self.getSubscribersForAlarm( alarmId )
if not result[ 'OK' ]:
return result
subscribers = result[ 'Value' ]
needLongText = False
if subscribers[ 'mail' ]:
needLongText = True
result = self.getAlarmInfo( alarmId )
if not result[ 'OK' ]:
return result
alarmInfo = result[ 'Value' ]
result = self.getAlarmLog( alarmId )
if not result[ 'OK' ]:
return result
alarmLog = result[ 'Value' ]
if subscribers[ 'notification' ]:
msg = self.__generateAlarmInfoMessage( alarmInfo )
logMsg = self.__generateAlarmLogMessage( alarmLog, True )
if logMsg:
msg = "%s\n\n%s\nLast modification:\n%s" % ( msg, "*"*30, logMsg )
for user in subscribers[ 'notification' ]:
self.addNotificationForUser( user, msg, 86400, deferToMail = True )
if subscribers[ 'mail' ]:
msg = self.__generateAlarmInfoMessage( alarmInfo )
logMsg = self.__generateAlarmLogMessage( alarmLog )
if logMsg:
msg = "%s\n\n%s\nAlarm Log:\n%s" % ( msg, "*"*30, logMsg )
subject = "Update on alarm %s" % alarmId
else:
subject = "New alarm %s" % alarmId
for user in subscribers[ 'mail' ]:
self.__sendMailToUser( user, subject, msg )
if subscribers[ 'sms' ]:
#TODO
pass
return S_OK()
def __generateAlarmLogMessage( self, alarmLog, showOnlyLast = False ):
if len( alarmLog[ 'Records' ] ) == 0:
return ""
records = alarmLog[ 'Records' ]
if showOnlyLast:
logToShow = [-1]
else:
logToShow = range( len( records ) - 1, -1, -1 )
finalMessage = []
for id in logToShow:
rec = records[ id ]
data = {}
for i in range( len( alarmLog[ 'ParameterNames' ] ) ):
if rec[i]:
data[ alarmLog[ 'ParameterNames' ][i] ] = rec[i]
#[ 'timestamp', 'author', 'comment', 'modifications' ]
msg = [ " Entry by : %s" % data[ 'author' ] ]
msg.append( " On : %s" % data[ 'timestamp' ].strftime( "%Y/%m/%d %H:%M:%S" ) )
if 'modifications' in data:
mods = data[ 'modifications' ]
keys = mods.keys()
keys.sort()
msg.append( " Modificaitons:" )
for key in keys:
msg.append( " %s -> %s" % ( key, mods[ key ] ) )
if 'comment' in data:
msg.append( " Comment:\n\n%s" % data[ 'comment' ] )
finalMessage.append( "\n".join( msg ) )
return "\n\n===============\n".join( finalMessage )
def __generateAlarmInfoMessage( self, alarmInfo ):
#[ 'alarmid', 'author', 'creationtime', 'modtime', 'subject', 'status', 'type', 'body', 'assignee' ]
msg = " Alarm %6d\n" % alarmInfo[ 'alarmid' ]
msg += " Author : %s\n" % alarmInfo[ 'author' ]
msg += " Subject : %s\n" % alarmInfo[ 'subject' ]
msg += " Status : %s\n" % alarmInfo[ 'status' ]
msg += " Priority : %s\n" % alarmInfo[ 'priority' ]
msg += " Assignee : %s\n" % alarmInfo[ 'assignee' ]
msg += " Creation date : %s UTC\n" % alarmInfo[ 'creationtime' ].strftime( "%Y/%m/%d %H:%M:%S" )
msg += " Last modificaiton : %s UTC\n" % alarmInfo[ 'modtime' ].strftime( "%Y/%m/%d %H:%M:%S" )
msg += " Body:\n\n%s" % alarmInfo[ 'body' ]
return msg
def __sendMailToUser( self, user, subject, message ):
address = gConfig.getValue( "/Registry/Users/%s/Email" % user, "" )
if not address:
self.log.error( "User does not have an email registered", user )
return S_ERROR( "User %s does not have an email registered" % user )
self.log.info( "Sending mail (%s) to user %s at %s" % ( subject, user, address ) )
m = Mail()
m._subject = "[DIRAC] %s" % subject
m._message = message
m._mailAddress = address
result = m._send()
if not result['OK']:
gLogger.warn( 'Could not send mail with the following message:\n%s' % result['Message'] )
return result
def getAlarms( self, condDict = {}, sortList = False, start = 0, limit = 0, modifiedAfter = False ):
condSQL = []
for field in self.__alarmQueryFields:
if field in condDict:
fieldValues = []
rawValue = condDict[ field ]
if field == 'assignee':
expandedValue = []
for user in rawValue:
result = self.getAssigneeGroupsForUser( user )
if not result[ 'OK' ]:
return result
for ag in result[ 'Value' ]:
if ag not in expandedValue:
expandedValue.append( ag )
rawValue = expandedValue
for value in rawValue:
result = self._escapeString( value )
if not result[ 'OK' ]:
return result
fieldValues.append( result[ 'Value' ] )
condSQL.append( "%s in ( %s )" % ( field, ",".join( fieldValues ) ) )
selSQL = "SELECT %s FROM `ntf_Alarms`" % ",".join( self.__alarmQueryFields )
if modifiedAfter:
condSQL.append( "ModTime >= %s" % modifiedAfter.strftime( "%Y-%m-%d %H:%M:%S" ) )
if condSQL:
selSQL = "%s WHERE %s" % ( selSQL, " AND ".join( condSQL ) )
if sortList:
selSQL += " ORDER BY %s" % ", ".join( [ "%s %s" % ( sort[0], sort[1] ) for sort in sortList ] )
if limit:
selSQL += " LIMIT %d,%d" % ( start, limit )
result = self._query( selSQL )
if not result['OK']:
return result
resultDict = {}
resultDict['ParameterNames'] = self.__alarmQueryFields
resultDict['Records'] = [ list( v ) for v in result['Value'] ]
return S_OK( resultDict )
def getAlarmInfo( self, alarmId ):
result = self.getAlarms( { 'alarmId' : alarmId } )
if not result[ 'OK' ]:
return result
alarmInfo = {}
data = result[ 'Value' ]
if len( data[ 'Records' ] ) == 0:
return S_OK( {} )
for i in range( len( data[ 'ParameterNames' ] ) ):
alarmInfo[ data[ 'ParameterNames' ][i] ] = data[ 'Records' ][0][i]
return S_OK( alarmInfo )
def getAlarmLog( self, alarmId ):
try:
alarmId = int( alarmId )
except:
return S_ERROR( "Alarm id must be a non decimal number" )
sqlSel = "SELECT %s FROM `ntf_AlarmLog` WHERE AlarmId=%d ORDER BY Timestamp ASC" % ( ",".join( self.__alarmLogFields ),
alarmId )
result = self._query( sqlSel )
if not result[ 'OK' ]:
return result
decodedRows = []
for row in result[ 'Value' ]:
decodedRows.append( list( row ) )
if not row[3]:
decodedRows.append( list( row ) )
continue
dec = DEncode.decode( row[ 3 ] )
decodedRows[-1][3] = dec[0]
resultDict = {}
resultDict['ParameterNames'] = self.__alarmLogFields
resultDict['Records'] = decodedRows
return S_OK( resultDict )
###
# Followers management
###
def modifyFollowerForAlarm( self, alarmId, user, notificationsDict, overwrite = True ):
rawUser = user
if rawUser not in CS.getAllUsers():
return S_OK()
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
subscriber = False
for k in notificationsDict:
if notificationsDict[ k ]:
subscriber = True
break
selSQL = "SELECT Notification, Mail, SMS FROM `ntf_AlarmFollowers` WHERE AlarmId=%d AND User=%s" % ( alarmId, user )
result = self._query( selSQL )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
if not subscriber:
return S_OK()
sqlValues = [ "%d" % alarmId, user ]
for k in self.__validAlarmNotifications:
if notificationsDict[ k ]:
sqlValues.append( "1" )
else:
sqlValues.append( "0" )
inSQL = "INSERT INTO `ntf_AlarmFollowers` ( AlarmId, User, Notification, Mail, SMS ) VALUES (%s)" % ",".join( sqlValues )
return self._update( inSQL )
sqlCond = "AlarmId=%d AND User=%s" % ( alarmId, user )
#Need to delete
if not subscriber:
return self._update( "DELETE FROM `ntf_AlarmFollowers` WHERE %s" % sqlCond )
if not overwrite:
return S_OK()
#Need to update
modSQL = []
for k in self.__validAlarmNotifications:
if notificationsDict[ k ]:
modSQL.append( "%s=1" % k )
else:
modSQL.append( "%s=0" % k )
return self._update( "UPDATE `ntf_AlarmFollowers` SET %s WHERE %s" % ( modSQL, sqlCond ) )
def getSubscribersForAlarm( self, alarmId ):
selSQL = "SELECT User, Mail, Notification, SMS FROM `ntf_AlarmFollowers` WHERE AlarmId=%d" % alarmId
result = self._query( selSQL )
if not result[ 'OK' ]:
return result
fw = result[ 'Value' ]
followWays = { 'mail' : [], 'notification' : [], 'sms' : [] }
followers = []
for user, mail, Notification, SMS in fw:
if user in followers:
continue
followers.append( user )
if mail:
followWays[ 'mail' ].append( user )
if Notification:
followWays[ 'notification' ].append( user )
if SMS:
followWays[ 'sms' ].append( user )
return S_OK( followWays )
###
# Assignee groups management
###
def getUserAsignees( self, assignee ):
#Check if it is a user
if assignee in CS.getAllUsers():
return S_OK( [ assignee ] )
result = self._escapeString( assignee )
if not result[ 'OK' ]:
return result
escAG = result[ 'Value' ]
sqlSel = "SELECT User FROM `ntf_AssigneeGroups` WHERE AssigneeGroup = %s" % escAG
result = self._query( sqlSel )
if not result[ 'OK' ]:
return result
users = [ row[0] for row in result[ 'Value' ] ]
if not users:
return S_OK( [] )
return S_OK( users )
def setAssigneeGroup( self, groupName, usersList ):
validUsers = CS.getAllUsers()
result = self._escapeString( groupName )
if not result[ 'OK' ]:
return result
escGroup = result[ 'Value' ]
sqlSel = "SELECT User FROM `ntf_AssigneeGroups` WHERE AssigneeGroup = %s" % escGroup
result = self._query( sqlSel )
if not result[ 'OK' ]:
return result
currentUsers = [ row[0] for row in result[ 'Value' ] ]
usersToDelete = []
usersToAdd = []
finalUsersInGroup = len( currentUsers )
for user in currentUsers:
if user not in usersList:
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
usersToDelete.append( result[ 'Value' ] )
finalUsersInGroup -= 1
for user in usersList:
if user not in validUsers:
continue
if user not in currentUsers:
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
usersToAdd.append( "( %s, %s )" % ( escGroup, result[ 'Value' ] ) )
finalUsersInGroup += 1
if not finalUsersInGroup:
return S_ERROR( "Group must have at least one user!" )
#Delete old users
if usersToDelete:
sqlDel = "DELETE FROM `ntf_AssigneeGroups` WHERE User in ( %s )" % ",".join( usersToDelete )
result = self._update( sqlDel )
if not result[ 'OK' ]:
return result
#Add new users
if usersToAdd:
sqlInsert = "INSERT INTO `ntf_AssigneeGroups` ( AssigneeGroup, User ) VALUES %s" % ",".join( usersToAdd )
result = self._update( sqlInsert )
if not result[ 'OK' ]:
return result
return S_OK()
def deleteAssigneeGroup( self, groupName ):
result = self._escapeString( groupName )
if not result[ 'OK' ]:
return result
escGroup = result[ 'Value' ]
sqlSel = "SELECT AlarmId FROM `ntf_Alarms` WHERE Assignee=%s" % escGroup
result = self._query( sqlSel )
if not result[ 'OK' ]:
return result
if result[ 'Value' ]:
alarmIds = [ row[0] for row in result[ 'Value' ] ]
return S_ERROR( "There are %s alarms assigned to this group" % len( alarmIds ) )
sqlDel = "DELETE FROM `ntf_AssigneeGroups` WHERE AssigneeGroup=%s" % escGroup
return self._update( sqlDel )
def getAssigneeGroups( self ):
result = self._query( "SELECT AssigneeGroup, User from `ntf_AssigneeGroups` ORDER BY User" )
if not result[ 'OK' ]:
return result
agDict = {}
for row in result[ 'Value' ]:
ag = row[0]
user = row[1]
if ag not in agDict:
agDict[ ag ] = []
agDict[ ag ].append( user )
return S_OK( agDict )
def getAssigneeGroupsForUser( self, user ):
if user not in CS.getAllUsers():
return S_ERROR( "%s is an unknown user" % user )
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
result = self._query( "SELECT AssigneeGroup from `ntf_AssigneeGroups` WHERE User=%s" % user )
if not result[ 'OK' ]:
return result
return S_OK( [ row[0] for row in result[ 'Value' ] ] )
###
# Notifications
###
def addNotificationForUser( self, user, message, lifetime = 0, deferToMail = 1 ):
if user not in CS.getAllUsers():
return S_ERROR( "%s is an unknown user" % user )
self.log.info( "Adding a notification for user %s (msg is %s chars)" % ( user, len( message ) ) )
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
result = self._escapeString( message )
if not result[ 'OK' ]:
return result
message = result[ 'Value' ]
sqlFields = [ 'User', 'Message', 'Timestamp' ]
sqlValues = [ user, message, 'UTC_TIMESTAMP()' ]
if not deferToMail:
sqlFields.append( "DeferToMail" )
sqlValues.append( "0" )
if lifetime:
sqlFields.append( "Expiration" )
sqlValues.append( "TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() )" % int( lifetime ) )
sqlInsert = "INSERT INTO `ntf_Notifications` (%s) VALUES (%s) " % ( ",".join( sqlFields ),
",".join( sqlValues ) )
result = self._update( sqlInsert )
if not result[ 'OK' ]:
return result
return S_OK( result[ 'lastRowId' ] )
def removeNotificationsForUser( self, user, msgIds = False ):
if user not in CS.getAllUsers():
return S_ERROR( "%s is an unknown user" % user )
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
delSQL = "DELETE FROM `ntf_Notifications` WHERE User=%s" % user
escapedIDs = []
if msgIds:
for id in msgIds:
result = self._escapeString( str( id ) )
if not result[ 'OK' ]:
return result
escapedIDs.append( result[ 'Value' ] )
delSQL = "%s AND Id in ( %s ) " % ( delSQL, ",".join( escapedIDs ) )
return self._update( delSQL )
def markNotificationsSeen( self, user, seen = True, msgIds = False ):
if user not in CS.getAllUsers():
return S_ERROR( "%s is an unknown user" % user )
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
if seen:
seen = 1
else:
seen = 0
updateSQL = "UPDATE `ntf_Notifications` SET Seen=%d WHERE User=%s" % ( seen, user )
escapedIDs = []
if msgIds:
for id in msgIds:
result = self._escapeString( str( id ) )
if not result[ 'OK' ]:
return result
escapedIDs.append( result[ 'Value' ] )
updateSQL = "%s AND Id in ( %s ) " % ( updateSQL, ",".join( escapedIDs ) )
return self._update( updateSQL )
def getNotifications( self, condDict = {}, sortList = False, start = 0, limit = 0 ):
condSQL = []
for field in self.__notificationQueryFields:
if field in condDict:
fieldValues = []
for value in condDict[ field ]:
result = self._escapeString( value )
if not result[ 'OK' ]:
return result
fieldValues.append( result[ 'Value' ] )
condSQL.append( "%s in ( %s )" % ( field, ",".join( fieldValues ) ) )
eSortList = []
for field, order in sortList:
if order.lower() in [ 'asc', 'desc' ]:
eSortList.append( ( '`%s`' % field.replace( '`', '' ), order ) )
selSQL = "SELECT %s FROM `ntf_Notifications`" % ",".join( self.__notificationQueryFields )
if condSQL:
selSQL = "%s WHERE %s" % ( selSQL, " AND ".join( condSQL ) )
if eSortList:
selSQL += " ORDER BY %s" % ", ".join( [ "%s %s" % ( sort[0], sort[1] ) for sort in eSortList ] )
else:
selSQL += " ORDER BY Id DESC"
if limit:
selSQL += " LIMIT %d,%d" % ( start, limit )
result = self._query( selSQL )
if not result['OK']:
return result
resultDict = {}
resultDict['ParameterNames'] = self.__notificationQueryFields
resultDict['Records'] = [ list( v ) for v in result['Value'] ]
return S_OK( resultDict )
def purgeExpiredNotifications( self ):
self.log.info( "Purging expired notifications" )
delConds = [ '(Seen=1 OR DeferToMail=0)', '(TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), Expiration ) < 0 )' ]
delSQL = "DELETE FROM `ntf_Notifications` WHERE %s" % " AND ".join( delConds )
result = self._update( delSQL )
if not result[ 'OK' ]:
return result
self.log.info( "Purged %s notifications" % result[ 'Value' ] )
deferCond = [ 'Seen=0', 'DeferToMail=1', 'TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), Expiration ) < 0' ]
selSQL = "SELECT Id, User, Message FROM `ntf_Notifications` WHERE %s" % " AND ".join( deferCond )
result = self._query( selSQL )
if not result[ 'OK' ]:
return result
messages = result[ 'Value' ]
if not messages:
return S_OK()
ids = []
for msg in messages:
self.__sendMailToUser( msg[1], 'Notification defered to mail', msg[2] )
ids.append( str( msg[0] ) )
self.log.info( "Deferred %s notifications" % len( ids ) )
return self._update( "DELETE FROM `ntf_Notifications` WHERE Id in (%s)" % ",".join( ids ) )
|
miloszz/DIRAC
|
FrameworkSystem/DB/NotificationDB.py
|
Python
|
gpl-3.0
| 34,669
|
[
"DIRAC"
] |
007a0e3bbb146a26f367aaa32ac0202ccb20c5ac0dc2ab96a74a282cf10ea663
|
# dagutil.py - dag utilities for mercurial
#
# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
# and Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullrev
from i18n import _
class basedag(object):
'''generic interface for DAGs
terms:
"ix" (short for index) identifies a nodes internally,
"id" identifies one externally.
All params are ixs unless explicitly suffixed otherwise.
Pluralized params are lists or sets.
'''
def __init__(self):
self._inverse = None
def nodeset(self):
'''set of all node ixs'''
raise NotImplementedError
def heads(self):
'''list of head ixs'''
raise NotImplementedError
def parents(self, ix):
'''list of parents ixs of ix'''
raise NotImplementedError
def inverse(self):
'''inverse DAG, where parents becomes children, etc.'''
raise NotImplementedError
def ancestorset(self, starts, stops=None):
'''
set of all ancestors of starts (incl), but stop walk at stops (excl)
'''
raise NotImplementedError
def descendantset(self, starts, stops=None):
'''
set of all descendants of starts (incl), but stop walk at stops (excl)
'''
return self.inverse().ancestorset(starts, stops)
def headsetofconnecteds(self, ixs):
'''
subset of connected list of ixs so that no node has a descendant in it
By "connected list" we mean that if an ancestor and a descendant are in
the list, then so is at least one path connecting them.
'''
raise NotImplementedError
def externalize(self, ix):
'''return a node id'''
return self._externalize(ix)
def externalizeall(self, ixs):
'''return a list of (or set if given a set) of node ids'''
ids = self._externalizeall(ixs)
if isinstance(ixs, set):
return set(ids)
return list(ids)
def internalize(self, id):
'''return a node ix'''
return self._internalize(id)
def internalizeall(self, ids, filterunknown=False):
'''return a list of (or set if given a set) of node ixs'''
ixs = self._internalizeall(ids, filterunknown)
if isinstance(ids, set):
return set(ixs)
return list(ixs)
class genericdag(basedag):
'''generic implementations for DAGs'''
def ancestorset(self, starts, stops=None):
if stops:
stops = set(stops)
else:
stops = set()
seen = set()
pending = list(starts)
while pending:
n = pending.pop()
if n not in seen and n not in stops:
seen.add(n)
pending.extend(self.parents(n))
return seen
def headsetofconnecteds(self, ixs):
hds = set(ixs)
if not hds:
return hds
for n in ixs:
for p in self.parents(n):
hds.discard(p)
assert hds
return hds
class revlogbaseddag(basedag):
'''generic dag interface to a revlog'''
def __init__(self, revlog, nodeset):
basedag.__init__(self)
self._revlog = revlog
self._heads = None
self._nodeset = nodeset
def nodeset(self):
return self._nodeset
def heads(self):
if self._heads is None:
self._heads = self._getheads()
return self._heads
def _externalize(self, ix):
return self._revlog.index[ix][7]
def _externalizeall(self, ixs):
idx = self._revlog.index
return [idx[i][7] for i in ixs]
def _internalize(self, id):
ix = self._revlog.rev(id)
if ix == nullrev:
raise LookupError(id, self._revlog.indexfile, _('nullid'))
return ix
def _internalizeall(self, ids, filterunknown):
rl = self._revlog
if filterunknown:
return [r for r in map(rl.nodemap.get, ids)
if (r is not None
and r != nullrev
and r not in rl.filteredrevs)]
return map(self._internalize, ids)
class revlogdag(revlogbaseddag):
'''dag interface to a revlog'''
def __init__(self, revlog):
revlogbaseddag.__init__(self, revlog, set(revlog))
def _getheads(self):
return [r for r in self._revlog.headrevs() if r != nullrev]
def parents(self, ix):
rlog = self._revlog
idx = rlog.index
revdata = idx[ix]
prev = revdata[5]
if prev != nullrev:
prev2 = revdata[6]
if prev2 == nullrev:
return [prev]
return [prev, prev2]
prev2 = revdata[6]
if prev2 != nullrev:
return [prev2]
return []
def inverse(self):
if self._inverse is None:
self._inverse = inverserevlogdag(self)
return self._inverse
def ancestorset(self, starts, stops=None):
rlog = self._revlog
idx = rlog.index
if stops:
stops = set(stops)
else:
stops = set()
seen = set()
pending = list(starts)
while pending:
rev = pending.pop()
if rev not in seen and rev not in stops:
seen.add(rev)
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
pending.append(prev)
return seen
def headsetofconnecteds(self, ixs):
if not ixs:
return set()
rlog = self._revlog
idx = rlog.index
headrevs = set(ixs)
for rev in ixs:
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
headrevs.discard(prev)
assert headrevs
return headrevs
def linearize(self, ixs):
'''linearize and topologically sort a list of revisions
The linearization process tries to create long runs of revs where
a child rev comes immediately after its first parent. This is done by
visiting the heads of the given revs in inverse topological order,
and for each visited rev, visiting its second parent, then its first
parent, then adding the rev itself to the output list.
'''
sorted = []
visit = list(self.headsetofconnecteds(ixs))
visit.sort(reverse=True)
finished = set()
while visit:
cur = visit.pop()
if cur < 0:
cur = -cur - 1
if cur not in finished:
sorted.append(cur)
finished.add(cur)
else:
visit.append(-cur - 1)
visit += [p for p in self.parents(cur)
if p in ixs and p not in finished]
assert len(sorted) == len(ixs)
return sorted
class inverserevlogdag(revlogbaseddag, genericdag):
'''inverse of an existing revlog dag; see revlogdag.inverse()'''
def __init__(self, orig):
revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
self._orig = orig
self._children = {}
self._roots = []
self._walkfrom = len(self._revlog) - 1
def _walkto(self, walkto):
rev = self._walkfrom
cs = self._children
roots = self._roots
idx = self._revlog.index
while rev >= walkto:
data = idx[rev]
isroot = True
for prev in [data[5], data[6]]: # parent revs
if prev != nullrev:
cs.setdefault(prev, []).append(rev)
isroot = False
if isroot:
roots.append(rev)
rev -= 1
self._walkfrom = rev
def _getheads(self):
self._walkto(nullrev)
return self._roots
def parents(self, ix):
if ix is None:
return []
if ix <= self._walkfrom:
self._walkto(ix)
return self._children.get(ix, [])
def inverse(self):
return self._orig
|
hekra01/mercurial
|
mercurial/dagutil.py
|
Python
|
gpl-2.0
| 8,316
|
[
"VisIt"
] |
94877410d9327a74dd16a9ec3680524f5114d96ccdf55683715c2aee20607004
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
########################################################################
# Solves problem 45 from projectEuler.net.
# Finds the second number which is triangular, pentagonal and hexagonal
# Copyright (C) 2010 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.wikidot.com
########################################################################
if __name__ == '__main__':
iT = 286
iP = 166
iH = 144
t = iT * (iT + 1) // 2
p = iP * (3 * iP - 1) // 2
h = iH * (2 * iH - 1)
while h != t or t != p:
h = iH * (2 * iH - 1)
iH += 1
while t < h:
t = iT * (iT + 1) // 2
iT += 1
while p < h:
p = iP * (3 * iP - 1) // 2
iP += 1
print("The result is:", h)
|
salessandri/programming-contests
|
project-euler/problem045.py
|
Python
|
gpl-3.0
| 1,538
|
[
"VisIt"
] |
18b9d1986fdcae0cdef741dbd3b700f2e4ba4466c2984e71f1b08312e3809eaa
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from zoo.tfpark.tf_dataset import TensorMeta
from zoo.util import nest
from zoo import getOrCreateSparkContext, get_node_and_core_number
from zoo.common import callZooFunc
from zoo.feature.common import FeatureSet
from zoo.orca.data import SparkXShards
from zoo.tfpark import TFDataset
class TFDataDataset2(TFDataset):
def __init__(self, dataset, batch_size,
batch_per_thread,
validation_dataset=None, intra_threads=None, inter_threads=None):
node_num, core_num = get_node_and_core_number()
self.intra_threads = intra_threads
self.inter_threads = inter_threads
if intra_threads is None:
self.intra_threads = core_num
if inter_threads is None:
self.inter_threads = 1
if batch_size > 0:
num_parts = dataset.xshards.num_partitions()
if num_parts != node_num:
dataset.xshards = dataset.xshards.repartition(node_num)
assert batch_size % node_num == 0, \
"batch_size should be a multiple of num_shards, got" \
" batch_size {}, node_num {}".format(batch_size, node_num)
batch_per_shard = batch_size // node_num
self.drop_remainder = True
elif batch_per_thread > 0:
batch_per_shard = batch_per_thread
self.drop_remainder = False
else:
raise ValueError("one of batch_size or batch_per_thread must be larger than 0")
self.rdd = dataset.as_graph_rdd(batch_per_shard,
drop_remainder=self.drop_remainder).cache()
meta_info = self.rdd.map(lambda x: x[1]).first()
tensor_structure = meta_info["tensor_structure"]
self.init_op_name = meta_info["init_op_name"]
self.output_names = meta_info["output_names"]
self.output_types = meta_info["output_types"]
self.table_init_op = meta_info["table_init_op"]
if validation_dataset is not None:
self.val_rdd = validation_dataset.as_graph_rdd(batch_per_shard, False).cache()
meta_info = self.val_rdd.map(lambda x: x[1]).first()
self.val_init_op_name = meta_info["init_op_name"]
self.val_output_names = meta_info["output_names"]
self.val_output_types = meta_info["output_types"]
else:
self.val_rdd = None
self.val_init_op_name = None
self.val_output_names = None
self.val_output_types = None
super().__init__(tensor_structure, batch_size=batch_size,
batch_per_thread=batch_per_thread,
hard_code_batch_size=False)
self.shard_index_op_name = None
self.validation_dataset = validation_dataset
def _get_prediction_data(self):
assert not self.drop_remainder, \
"sanity check: drop_remainder should be false in this case," \
" otherwise please report a bug"
jvalue = callZooFunc("float", "createMiniBatchRDDFromTFDataset",
self.rdd.map(lambda x: x[0]), self.init_op_name, self.table_init_op,
self.output_names, self.output_types, self.shard_index_op_name)
rdd = jvalue.value().toJavaRDD()
return rdd
def _get_evaluation_data(self):
jvalue = callZooFunc("float", "createMiniBatchRDDFromTFDatasetEval",
self.rdd.map(lambda x: x[0]), self.init_op_name, self.table_init_op,
self.output_names,
self.output_types, self.shard_index_op_name)
rdd = jvalue.value().toJavaRDD()
return rdd
def _get_training_data(self):
jvalue = callZooFunc("float", "createTFDataFeatureSet",
self.rdd.map(lambda x: x[0]), self.init_op_name, self.table_init_op,
self.output_names, self.output_types, self.shard_index_op_name,
self.inter_threads, self.intra_threads)
return FeatureSet(jvalue=jvalue)
def _get_validation_data(self):
if self.validation_dataset is not None:
jvalue = callZooFunc("float", "createTFDataFeatureSet",
self.val_rdd.map(lambda x: x[0]), self.init_op_name,
self.table_init_op, self.output_names,
self.output_types, self.shard_index_op_name,
self.inter_threads, self.intra_threads)
return FeatureSet(jvalue=jvalue)
return None
def get_num_partitions(self):
return self.rdd.getNumPartitions()
class Dataset(object):
"""
Represents a distributed set of elements backed by an RDD,
which is created by applying tensorflow dataset transformations
on each partitions.
"""
def __init__(self, xshards, create_dataset_fn):
self.xshards = xshards
self.create_dataset_fn = create_dataset_fn
def as_graph_rdd(self, batch_per_shard, drop_remainder=True):
create_dataset_fn = self.create_dataset_fn
def to_dataset(iter):
data_list = list(iter)
import tensorflow as tf
if not data_list:
return []
datasets = [create_dataset_fn(data) for data in data_list]
from functools import reduce
dataset = reduce(lambda x, y: x.concatenate(y), datasets)
dataset = dataset.batch(batch_per_shard, drop_remainder)
iterator = dataset.make_initializable_iterator()
train_next_ops = nest.flatten(iterator.get_next())
output_types = [t for t in nest.flatten(dataset.output_types)]
output_types_enum = [t.as_datatype_enum for t in output_types]
init_op_name = iterator.initializer.name
table_init_op = tf.tables_initializer().name
output_names = [op.name for op in train_next_ops]
graph = train_next_ops[0].graph
flatten_shapes = nest.flatten(dataset.output_shapes)
flatten_shapes = [shape[1:] for shape in flatten_shapes]
flatten_tensor_structure = [TensorMeta(dtype=output_types[i],
shape=list(flatten_shapes[i]),
name="zoo_input_{}".format(i))
for i in range(len(flatten_shapes))]
structure = dataset.output_types
if isinstance(structure, tf.DType):
structure = (structure,)
tensor_structure = nest.pack_sequence_as(structure,
flatten_tensor_structure)
meta_info = {
"init_op_name": init_op_name,
"table_init_op": table_init_op,
"output_names": output_names,
"output_types": output_types_enum,
"tensor_structure": tensor_structure
}
return [(bytearray(graph.as_graph_def().SerializeToString()), meta_info)]
graph_rdd_and_meta = self.xshards.rdd.mapPartitions(to_dataset)
return graph_rdd_and_meta
@staticmethod
def from_tensor_slices(xshards):
return TensorSliceDataset(xshards)
def map(self, map_func):
return MapDataset(self, map_func)
class TensorSliceDataset(Dataset):
def __init__(self, xshards):
assert isinstance(xshards, SparkXShards), \
"only datasets backed by a SparkXShards are supported"
self.xshards = xshards
def create_dataset_fn(data):
return tf.data.Dataset.from_tensor_slices(data)
super().__init__(xshards, create_dataset_fn)
class MapDataset(Dataset):
def __init__(self, input_dataset, map_func):
create_pre_dataset_fn = input_dataset.create_dataset_fn
def create_dataset_fn(data):
dataset = create_pre_dataset_fn(data)
return dataset.map(map_func)
super().__init__(xshards=input_dataset.xshards,
create_dataset_fn=create_dataset_fn)
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/orca/data/tf/data.py
|
Python
|
apache-2.0
| 8,821
|
[
"ORCA"
] |
4ccd248479f8c2e0f511fb12e9fc238ff327f3c710ca8319f52e1cd4da9b7cbc
|
""" FileManager for ... ?
"""
__RCSID__ = "$Id$"
import os
import datetime
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManagerBase import FileManagerBase
from DIRAC.Core.Utilities.List import stringListToString, \
intListToString, \
breakListIntoChunks
# The logic of some methods is basically a copy/paste from the FileManager class,
# so I could have inherited from it. However, I did not want to depend on it
class FileManagerPs(FileManagerBase):
def __init__(self, database=None):
super(FileManagerPs, self).__init__(database)
######################################################
#
# The all important _findFiles and _getDirectoryFiles methods
#
def _findFiles(self, lfns, metadata=['FileID'], allStatus=False, connection=False):
""" Returns the information for the given lfns
The logic works nicely in the FileManager, so I pretty much copied it.
:param lfns: list of lfns
:param metadata: list of params that we want to get for each lfn
:param allStatus: consider all file status or only those defined in db.visibleFileStatus
:return successful/failed convention. successful is a dict < lfn : dict of metadata >
"""
connection = self._getConnection(connection)
dirDict = self._getFileDirectories(lfns)
result = self.db.dtree.findDirs(dirDict.keys())
if not result['OK']:
return result
directoryIDs = result['Value']
failed = {}
successful = {}
for dirPath in directoryIDs:
fileNames = dirDict[dirPath]
res = self._getDirectoryFiles(directoryIDs[dirPath], fileNames, metadata,
allStatus=allStatus, connection=connection)
for fileName, fileDict in res.get('Value', {}).items():
fname = os.path.join(dirPath, fileName)
successful[fname] = fileDict
# The lfns that are not in successful nor failed don't exist
for failedLfn in (set(lfns) - set(successful)):
failed.setdefault(failedLfn, "No such file or directory")
return S_OK({"Successful": successful, "Failed": failed})
def _findFileIDs(self, lfns, connection=False):
""" Find lfn <-> FileID correspondence
"""
connection = self._getConnection(connection)
failed = {}
successful = {}
# If there is only one lfn, we might as well make a direct query
if len(lfns) == 1:
lfn = list(lfns)[0] # if lfns is a dict, list(lfns) returns lfns.keys()
pathPart, filePart = os.path.split(lfn)
result = self.db.executeStoredProcedure(
'ps_get_file_id_from_lfn', (pathPart, filePart, 'ret1'), outputIds=[2])
if not result['OK']:
return result
fileId = result['Value'][0]
if not fileId:
failed[lfn] = "No such file or directory"
else:
successful[lfn] = fileId
else:
# We separate the files by directory
filesInDirDict = self._getFileDirectories(lfns)
# We get the directory ids
result = self.db.dtree.findDirs(filesInDirDict.keys())
if not result['OK']:
return result
directoryPathToIds = result['Value']
# For each directory, we get the file ids of the files we want
for dirPath in directoryPathToIds:
fileNames = filesInDirDict[dirPath]
dirID = directoryPathToIds[dirPath]
formatedFileNames = stringListToString(fileNames)
result = self.db.executeStoredProcedureWithCursor(
'ps_get_file_ids_from_dir_id', (dirID, formatedFileNames))
if not result['OK']:
return result
for fileID, fileName in result['Value']:
fname = os.path.join(dirPath, fileName)
successful[fname] = fileID
# The lfns that are not in successful dont exist
for failedLfn in (set(lfns) - set(successful)):
failed[failedLfn] = "No such file or directory"
return S_OK({"Successful": successful, "Failed": failed})
def _getDirectoryFiles(self, dirID, fileNames, metadata_input, allStatus=False, connection=False):
""" For a given directory, and eventually given file, returns all the desired metadata
:param int dirID: directory ID
:param fileNames: the list of filenames, or []
:param metadata_input: list of desired metadata.
It can be anything from (FileName, DirID, FileID, Size, UID, Owner,
GID, OwnerGroup, Status, GUID, Checksum, ChecksumType, Type, CreationDate, ModificationDate, Mode)
:param bool allStatus: if False, only displays the files whose status is in db.visibleFileStatus
:returns: S_OK(files), where files is a dictionary indexed on filename, and values are dictionary of metadata
"""
connection = self._getConnection(connection)
metadata = list(metadata_input)
if "UID" in metadata:
metadata.append("Owner")
if "GID" in metadata:
metadata.append("OwnerGroup")
if "FileID" not in metadata:
metadata.append("FileID")
# Format the filenames and status to be used in a IN clause in the sotred procedure
formatedFileNames = stringListToString(fileNames)
fStatus = stringListToString(self.db.visibleFileStatus)
specificFiles = True if len(fileNames) else False
result = self.db.executeStoredProcedureWithCursor('ps_get_all_info_for_files_in_dir',
(dirID, specificFiles,
formatedFileNames, allStatus, fStatus))
if not result['OK']:
return result
fieldNames = ["FileName", "DirID", "FileID", "Size", "UID", "Owner",
"GID", "OwnerGroup", "Status", "GUID", "Checksum",
"ChecksumType", "Type", "CreationDate", "ModificationDate", "Mode"]
rows = result['Value']
files = {}
for row in rows:
rowDict = dict(zip(fieldNames, row))
fileName = rowDict['FileName']
# Returns only the required metadata
files[fileName] = dict((key, rowDict.get(key, "Unknown metadata field")) for key in metadata)
return S_OK(files)
def _getFileMetadataByID(self, fileIDs, connection=False):
""" Get standard file metadata for a list of files specified by FileID
:param fileIDS : list of file Ids
:returns: S_OK(files), where files is a dictionary indexed on fileID
and the values dictionaries containing the following info:
["FileID", "Size", "UID", "GID", "s.Status", "GUID", "CreationDate"]
"""
# Format the filenames and status to be used in a IN clause in the sotred procedure
formatedFileIds = intListToString(fileIDs)
result = self.db.executeStoredProcedureWithCursor(
'ps_get_all_info_for_file_ids', (formatedFileIds, ))
if not result['OK']:
return result
rows = result['Value']
fieldNames = ["FileID", "Size", "UID", "GID", "s.Status", "GUID", "CreationDate"]
resultDict = {}
for row in rows:
rowDict = dict(zip(fieldNames, row))
rowDict["Size"] = int(rowDict["Size"])
rowDict["UID"] = int(rowDict["UID"])
rowDict["GID"] = int(rowDict["GID"])
resultDict[rowDict["FileID"]] = rowDict
return S_OK(resultDict)
def __insertMultipleFiles(self, allFileValues, wantedLfns):
""" Insert multiple files in one query. However, if there is a problem
with one file, all the query is rolled back.
:param allFileValues : dictionary of tuple with all the information about possibly more
files than we want to insert
:param wantedLfns : list of lfn that we want to insert
"""
fileValuesStrings = []
fileDescStrings = []
for lfn in wantedLfns:
dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, mode = allFileValues[
lfn]
utcNow = datetime.datetime.utcnow().replace(microsecond=0)
fileValuesStrings.append("(%s, %s, %s, %s, %s, '%s', '%s', '%s', '%s', '%s', '%s', %s)" % (
dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, utcNow, utcNow, mode))
fileDescStrings.append("(DirID = %s AND FileName = '%s')" % (dirID, fileName))
fileValuesStr = ",".join(fileValuesStrings)
fileDescStr = " OR ".join(fileDescStrings)
result = self.db.executeStoredProcedureWithCursor(
'ps_insert_multiple_file', (fileValuesStr, fileDescStr))
return result
def __chunks(self, l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def _insertFiles(self, lfns, uid, gid, connection=False):
""" Insert new files. lfns is a dictionary indexed on lfn, the values are
mandatory: DirID, Size, Checksum, GUID
optional : Owner (dict with username and group), ChecksumType (Adler32 by default), Mode (db.umask by default)
:param lfns : lfns and info to insert
:param uid : user id, overwriten by Owner['username'] if defined
:param gid : user id, overwriten by Owner['group'] if defined
"""
connection = self._getConnection(connection)
failed = {}
successful = {}
res = self._getStatusInt('AprioriGood', connection=connection)
if res['OK']:
statusID = res['Value']
else:
return res
lfnsToRetry = []
fileValues = {}
fileDesc = {}
# Prepare each file separately
for lfn in lfns:
# Get all the info
fileInfo = lfns[lfn]
dirID = fileInfo['DirID']
fileName = os.path.basename(lfn)
size = fileInfo['Size']
ownerDict = fileInfo.get('Owner', None)
checksum = fileInfo['Checksum']
checksumtype = fileInfo.get('ChecksumType', 'Adler32')
guid = fileInfo['GUID']
mode = fileInfo.get('Mode', self.db.umask)
s_uid = uid
s_gid = gid
# overwrite the s_uid and s_gid if defined in the lfn info
if ownerDict:
result = self.db.ugManager.getUserAndGroupID(ownerDict)
if result['OK']:
s_uid, s_gid = result['Value']
fileValues[lfn] = (dirID, size, s_uid, s_gid,
statusID, fileName, guid,
checksum, checksumtype, mode)
fileDesc[(dirID, fileName)] = lfn
chunkSize = 200
allChunks = list(self.__chunks(lfns.keys(), chunkSize))
for lfnChunk in allChunks:
result = self.__insertMultipleFiles(fileValues, lfnChunk)
if result['OK']:
allIds = result['Value']
for dirId, fileName, fileID in allIds:
lfn = fileDesc[(dirId, fileName)]
successful[lfn] = lfns[lfn]
successful[lfn]['FileID'] = fileID
else:
lfnsToRetry.extend(lfnChunk)
# If we are here, that means that the multiple insert failed, so we do one by one
for lfn in lfnsToRetry:
dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, mode = fileValues[
lfn]
# insert
result = self.db.executeStoredProcedureWithCursor(
'ps_insert_file',
(dirID,
size,
s_uid,
s_gid,
statusID,
fileName,
guid,
checksum,
checksumtype,
mode))
if not result['OK']:
failed[lfn] = result['Message']
else:
fileID = result['Value'][0][0]
successful[lfn] = lfns[lfn]
successful[lfn]['FileID'] = fileID
return S_OK({'Successful': successful, 'Failed': failed})
def _getFileIDFromGUID(self, guids, connection=False):
""" Returns the file ids from list of guids
:param guids : list of guid
:returns dictionary < guid : fileId >
"""
connection = self._getConnection(connection)
if not guids:
return S_OK({})
if not isinstance(guids, (list, tuple)):
guids = [guids]
# formatedGuids = ','.join( [ '"%s"' % guid for guid in guids ] )
formatedGuids = stringListToString(guids)
result = self.db.executeStoredProcedureWithCursor(
'ps_get_file_ids_from_guids', (formatedGuids, ))
if not result['OK']:
return result
guidDict = dict((guid, fileID) for guid, fileID in result['Value'])
return S_OK(guidDict)
def getLFNForGUID(self, guids, connection=False):
""" Returns the lfns matching given guids"""
connection = self._getConnection(connection)
if not guids:
return S_OK({})
if not isinstance(guids, (list, tuple)):
guids = [guids]
formatedGuids = stringListToString(guids)
result = self.db.executeStoredProcedureWithCursor('ps_get_lfns_from_guids', (formatedGuids, ))
if not result['OK']:
return result
guidDict = dict((guid, lfn) for guid, lfn in result['Value'])
failedGuid = set(guids) - set(guidDict)
failed = dict.fromkeys(failedGuid, "GUID does not exist") if failedGuid else {}
return S_OK({"Successful": guidDict, "Failed": failed})
######################################################
#
# _deleteFiles related methods
#
def _deleteFiles(self, fileIDs, connection=False):
""" Delete a list of files and the associated replicas
:param fileIDS : list of fileID
:returns: S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
replicaPurge = self.__deleteFileReplicas(fileIDs)
filePurge = self.__deleteFiles(fileIDs, connection=connection)
if not replicaPurge['OK']:
return replicaPurge
if not filePurge['OK']:
return filePurge
return S_OK()
def __deleteFileReplicas(self, fileIDs, connection=False):
""" Delete all the replicas from the file ids
:param fileIDs: list of file ids
:returns: S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
if not fileIDs:
return S_OK()
formatedFileIds = intListToString(fileIDs)
result = self.db.executeStoredProcedureWithCursor(
'ps_delete_replicas_from_file_ids', (formatedFileIds, ))
if not result['OK']:
return result
errno, msg = result['Value'][0]
if errno:
return S_ERROR(msg)
return S_OK()
def __deleteFiles(self, fileIDs, connection=False):
""" Delete the files from their ids
:param fileIDs: list of file ids
:returns: S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
formatedFileIds = intListToString(fileIDs)
result = self.db.executeStoredProcedureWithCursor('ps_delete_files', (formatedFileIds, ))
if not result['OK']:
return result
errno, msg = result['Value'][0]
if errno:
return S_ERROR(msg)
return S_OK()
def __insertMultipleReplicas(self, allReplicaValues, lfnsChunk):
""" Insert multiple replicas in one query. However, if there is a problem
with one replica, all the query is rolled back.
:param allReplicaValues : dictionary of tuple with all the information about possibly more
replica than we want to insert
:param lfnsChunk : list of lfn that we want to insert
"""
repValuesStrings = []
repDescStrings = []
for lfn in lfnsChunk:
fileID, seID, statusID, replicaType, pfn = allReplicaValues[lfn]
utcNow = datetime.datetime.utcnow().replace(microsecond=0)
repValuesStrings.append("(%s,%s,'%s','%s','%s','%s','%s')" %
(fileID, seID, statusID, replicaType, utcNow, utcNow, pfn))
repDescStrings.append("(r.FileID = %s AND SEID = %s)" % (fileID, seID))
repValuesStr = ",".join(repValuesStrings)
repDescStr = " OR ".join(repDescStrings)
result = self.db.executeStoredProcedureWithCursor(
'ps_insert_multiple_replica', (repValuesStr, repDescStr))
return result
def _insertReplicas(self, lfns, master=False, connection=False):
""" Insert new replicas. lfns is a dictionary with one entry for each file. The keys are lfns, and values are dict
with mandatory attributes : FileID, SE (the name), PFN
:param lfns: lfns and info to insert
:param master: true if they are master replica, otherwise they will be just 'Replica'
:return: successful/failed convention, with successful[lfn] = true
"""
chunkSize = 200
connection = self._getConnection(connection)
# Add the files
failed = {}
successful = {}
# Get the status id of AprioriGood
res = self._getStatusInt('AprioriGood', connection=connection)
if not res['OK']:
return res
statusID = res['Value']
lfnsToRetry = []
repValues = {}
repDesc = {}
# treat each file after each other
for lfn in lfns.keys():
fileID = lfns[lfn]['FileID']
seName = lfns[lfn]['SE']
if isinstance(seName, basestring):
seList = [seName]
elif isinstance(seName, list):
seList = seName
else:
return S_ERROR('Illegal type of SE list: %s' % str(type(seName)))
replicaType = 'Master' if master else 'Replica'
pfn = lfns[lfn]['PFN']
# treat each replica of a file after the other
# (THIS CANNOT WORK... WE ARE ONLY CAPABLE OF DOING ONE REPLICA PER FILE AT THE TIME)
for seName in seList:
# get the SE id
res = self.db.seManager.findSE(seName)
if not res['OK']:
failed[lfn] = res['Message']
continue
seID = res['Value']
# This is incompatible with adding multiple replica at the time for a given file
repValues[lfn] = (fileID, seID, statusID, replicaType, pfn)
repDesc[(fileID, seID)] = lfn
allChunks = list(self.__chunks(lfns.keys(), chunkSize))
for lfnChunk in allChunks:
result = self.__insertMultipleReplicas(repValues, lfnChunk)
if result['OK']:
allIds = result['Value']
for fileId, seId, repId in allIds:
lfn = repDesc[(fileId, seId)]
successful[lfn] = True
lfns[lfn]['RepID'] = repId
else:
lfnsToRetry.extend(lfnChunk)
for lfn in lfnsToRetry:
fileID, seID, statusID, replicaType, pfn = repValues[lfn]
# insert the replica and its info
result = self.db.executeStoredProcedureWithCursor('ps_insert_replica',
(fileID, seID, statusID, replicaType, pfn))
if not result['OK']:
failed[lfn] = result['Message']
else:
replicaID = result['Value'][0][0]
lfns[lfn]['RepID'] = replicaID
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def _getRepIDsForReplica(self, replicaTuples, connection=False):
""" Get the Replica IDs for (fileId, SEID) couples
:param repliacTuples : list of (fileId, SEID) couple
:returns { fileID : { seID : RepID } }
"""
connection = self._getConnection(connection)
replicaDict = {}
for fileID, seID in replicaTuples:
result = self.db.executeStoredProcedure(
'ps_get_replica_id', (fileID, seID, 'repIdOut'), outputIds=[2])
if not result['OK']:
return result
repID = result['Value'][0]
# if the replica exists, we add it to the dict
if repID:
replicaDict.setdefault(fileID, {}).setdefault(seID, repID)
return S_OK(replicaDict)
######################################################
#
# _deleteReplicas related methods
#
def _deleteReplicas(self, lfns, connection=False):
""" Deletes replicas. The deletion of replicas that do not exist is successful
:param lfns : dictinary with lfns as key, and the value is a dict with a mandatory "SE" key,
corresponding to the SE name or SE ID
:returns: successful/failed convention, with successful[lfn] = True
"""
connection = self._getConnection(connection)
failed = {}
successful = {}
# First we get the fileIds from our lfns
res = self._findFiles(lfns.keys(), ['FileID'], connection=connection)
if not res['OK']:
return res
# If the file does not exist we consider the deletion successful
for lfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
successful[lfn] = True
else:
failed[lfn] = error
lfnFileIDDict = res['Value']['Successful']
for lfn, fileDict in lfnFileIDDict.items():
fileID = fileDict['FileID']
# Then we get our StorageElement Id (cached in seManager)
se = lfns[lfn]['SE']
# if se is already the se id, findSE will return it
res = self.db.seManager.findSE(se)
if not res['OK']:
return res
seID = res['Value']
# Finally remove the replica
result = self.db.executeStoredProcedureWithCursor(
'ps_delete_replica_from_file_and_se_ids', (fileID, seID))
if not result['OK']:
failed[lfn] = result['Message']
continue
errno, errMsg = result['Value'][0]
if errno:
failed[lfn] = errMsg
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
######################################################
#
# _setReplicaStatus _setReplicaHost _setReplicaParameter methods
# _setFileParameter method
#
def _setReplicaStatus(self, fileID, se, status, connection=False):
""" Set the status of a replica
:param fileID : file id
:param se : se name or se id
:param status : status to be applied
:returns: S_OK() or S_ERROR(msg)
"""
if status not in self.db.validReplicaStatus:
return S_ERROR('Invalid replica status %s' % status)
connection = self._getConnection(connection)
res = self._getStatusInt(status, connection=connection)
if not res['OK']:
return res
statusID = res['Value']
# Then we get our StorageElement Id (cached in seManager)
res = self.db.seManager.findSE(se)
if not res['OK']:
return res
seID = res['Value']
result = self.db.executeStoredProcedureWithCursor(
'ps_set_replica_status', (fileID, seID, statusID))
if not result['OK']:
return result
affected = result['Value'][0][0] # Affected is the number of raws updated
if not affected:
return S_ERROR("Replica does not exist")
return S_OK()
def _setReplicaHost(self, fileID, se, newSE, connection=False):
""" Move a replica from one SE to another (I don't think this should be called
:param fileID : file id
:param se : se name or se id of the previous se
:param newSE : se name or se id of the new se
:returns: S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
# Get the new se id
res = self.db.seManager.findSE(newSE)
if not res['OK']:
return res
newSEID = res['Value']
# Get the old se id
res = self.db.seManager.findSE(se)
if not res['OK']:
return res
oldSEID = res['Value']
# update
result = self.db.executeStoredProcedureWithCursor(
'ps_set_replica_host', (fileID, oldSEID, newSEID))
if not result['OK']:
return result
affected = result['Value'][0][0]
if not affected:
return S_ERROR("Replica does not exist")
else:
return S_OK()
def _setFileParameter(self, fileID, paramName, paramValue, connection=False):
""" Generic method to set a file parameter
:param fileID : id of the file
:param paramName : the file parameter you want to change
It should be one of [ UID, GID, Status, Mode]. However, in case of
unexpected parameter, and to stay compatible with the other Manager,
there is a manual request done.
:param paramValue : the value (raw, or id) to insert
:returns: S_OK() or S_ERROR
"""
connection = self._getConnection(connection)
# The PS associated with a given parameter
psNames = {'UID': 'ps_set_file_uid',
'GID': 'ps_set_file_gid',
'Status': 'ps_set_file_status',
'Mode': 'ps_set_file_mode'}
psName = psNames.get(paramName, None)
# If there is an associated procedure, we go for it
if psName:
result = self.db.executeStoredProcedureWithCursor(psName, (fileID, paramValue))
if not result['OK']:
return result
_affected = result['Value'][0][0]
# If affected = 0, the file does not exist, but who cares...
# In case this is a 'new' parameter, we have a failback solution, but we
# should add a specific ps for it
else:
req = "UPDATE FC_Files SET %s='%s', ModificationDate=UTC_TIMESTAMP() WHERE FileID IN (%s)"\
% (paramName, paramValue, intListToString(fileID))
return self.db._update(req, connection)
return S_OK()
######################################################
#
# _getFileReplicas related methods
#
def _getFileReplicas(self, fileIDs, fields_input=['PFN'], allStatus=False, connection=False):
""" Get replicas for the given list of files specified by their fileIDs
:param fileIDs : list of file ids
:param fields_input : metadata of the Replicas we are interested in
:param allStatus : if True, all the Replica statuses will be considered,
otherwise, only the db.visibleReplicaStatus
:returns S_OK with a dict { fileID : { SE name : dict of metadata } }
"""
connection = self._getConnection(connection)
fields = list(fields_input)
if 'Status' not in fields:
fields.append('Status')
replicas = {}
# Format the status to be used in a IN clause in the stored procedure
fStatus = stringListToString(self.db.visibleReplicaStatus)
fieldNames = ["FileID", "SE", "Status", "RepType", "CreationDate", "ModificationDate", "PFN"]
for fileID in fileIDs:
result = self.db.executeStoredProcedureWithCursor('ps_get_all_info_of_replicas',
(fileID, allStatus, fStatus))
if not result['OK']:
return result
rows = result['Value']
if not rows:
replicas[fileID] = {}
for row in rows:
rowDict = dict(zip(fieldNames, row))
# Returns only the required metadata
se = rowDict["SE"]
repForFile = replicas.setdefault(fileID, {})
repForFile[se] = dict((key, rowDict.get(key, "Unknown metadata field")) for key in fields)
return S_OK(replicas)
def countFilesInDir(self, dirId):
""" Count how many files there is in a given Directory
:param dirID: directory id
:returns: S_OK(value) or S_ERROR
"""
result = self.db.executeStoredProcedure('ps_count_files_in_dir', (dirId, 'ret1'), outputIds=[1])
if not result['OK']:
return result
res = S_OK(result['Value'][0])
return res
##########################################################################
#
# We overwrite some methods from the base class because of the new DB constraints or perf reasons
#
# Some methods could be inherited in the future if we have perf problems. For example
# * setFileGroup
# * setFileOwner
# * setFileMode
# * changePath*
#
##########################################################################
def _updateDirectoryUsage(self, directorySEDict, change, connection=False):
""" This updates the directory usage, but is now done by triggers in the DB"""
return S_OK()
def _computeStorageUsageOnRemoveFile(self, lfns, connection=False):
"""Again nothing to compute, all done by the triggers"""
directorySESizeDict = {}
return S_OK(directorySESizeDict)
# "REMARQUE : THIS IS STILL TRUE, BUT YOU MIGHT WANT TO CHECK FOR A GIVEN GUID ANYWAY
# def _checkUniqueGUID( self, lfns, connection = False ):
# """ The GUID unicity is ensured at the DB level, so we will have similar message if the insertion fails"""
#
# failed = {}
# return failed
def getDirectoryReplicas(self, dirID, path, allStatus=False, connection=False):
"""
This is defined in the FileManagerBase but it relies on the SEManager to get the SE names.
It is good practice in software, but since the SE and Replica tables are bound together in the DB,
I might as well resolve the name in the query
Get the replicas for all the Files in the given Directory
:param int dirID: ID of the directory
:param unused path: useless
:param bool allStatus: whether all replicas and file status are considered
If False, take the visibleFileStatus and visibleReplicaStatus
values from the configuration
"""
# We format the visible file/replica satus so we can give it as argument to the ps
# It is used in an IN clause, so it looks like --'"AprioriGood","Trash"'--
# fStatus = ','.join( [ '"%s"' % status for status in self.db.visibleFileStatus ] )
# rStatus = ','.join( [ '"%s"' % status for status in self.db.visibleReplicaStatus ] )
fStatus = stringListToString(self.db.visibleFileStatus)
rStatus = stringListToString(self.db.visibleReplicaStatus)
result = self.db.executeStoredProcedureWithCursor(
'ps_get_replicas_for_files_in_dir', (dirID, allStatus, fStatus, rStatus))
if not result['OK']:
return result
resultDict = {}
for fileName, _fileID, seName, pfn in result['Value']:
resultDict.setdefault(fileName, {}).setdefault(seName, []).append(pfn)
return S_OK(resultDict)
def _getFileLFNs(self, fileIDs):
""" Get the file LFNs for a given list of file IDs
We need to override this method because the base class hard codes the column names
"""
successful = {}
for chunks in breakListIntoChunks(fileIDs, 1000):
# Format the filenames and status to be used in a IN clause in the sotred procedure
formatedFileIds = intListToString(chunks)
result = self.db.executeStoredProcedureWithCursor(
'ps_get_full_lfn_for_file_ids', (formatedFileIds, ))
if not result['OK']:
return result
# The result contains FileID, LFN
for row in result['Value']:
successful[row[0]] = row[1]
missingIds = set(fileIDs) - set(successful)
failed = dict.fromkeys(missingIds, "File ID not found")
return S_OK({'Successful': successful, 'Failed': failed})
def getSEDump(self, seName):
"""
Return all the files at a given SE, together with checksum and size
:param seName: name of the StorageElement
:returns: S_OK with list of tuples (lfn, checksum, size)
"""
res = self.db.seManager.findSE(seName)
if not res['OK']:
return res
seID = res['Value']
return self.db.executeStoredProcedureWithCursor('ps_get_se_dump', (seID,))
|
chaen/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/WithFkAndPs/FileManagerPs.py
|
Python
|
gpl-3.0
| 30,871
|
[
"DIRAC"
] |
0bab1d0f12a1fed1f302a46b5f0247bb2bfbc1ac565443524c75e356cba8e431
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Apr 17, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 17, 2012"
import unittest
import os
from pymatgen import Molecule
from pymatgen.io.gaussianio import GaussianInput, GaussianOutput
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
class GaussianInputTest(unittest.TestCase):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.coords = coords
mol = Molecule(["C", "H", "H", "H", "H"], coords)
self.gau = GaussianInput(
mol, route_parameters={'SP': "", "SCF": "Tight"},
input_parameters={"EPS": 12})
def test_init(self):
mol = Molecule(["C", "H", "H", "H", "H"], self.coords)
gau = GaussianInput(mol, charge=1, route_parameters={'SP': "",
"SCF": "Tight"})
self.assertEqual(gau.spin_multiplicity, 2)
mol = Molecule(["C", "H", "H", "H", "H"], self.coords, charge=-1)
gau = GaussianInput(mol, route_parameters={'SP': "", "SCF": "Tight"})
self.assertEqual(gau.spin_multiplicity, 2)
self.assertRaises(ValueError, GaussianInput, mol, spin_multiplicity=1)
def test_str_and_from_string(self):
ans = """#P HF/6-31G(d) SCF=Tight SP Test
H4 C1
0 1
C
H 1 B1
H 1 B2 2 A2
H 1 B3 2 A3 3 D3
H 1 B4 2 A4 4 D4
B1=1.089000
B2=1.089000
A2=109.471221
B3=1.089000
A3=109.471213
D3=120.000017
B4=1.089000
A4=109.471213
D4=119.999966
EPS=12
"""
self.assertEqual(str(self.gau), ans)
gau = GaussianInput.from_string(ans)
self.assertEqual(gau.functional, 'HF')
self.assertEqual(gau.input_parameters['EPS'], '12')
def test_from_file(self):
filepath = os.path.join(test_dir, 'MethylPyrrolidine_drawn.gjf')
gau = GaussianInput.from_file(filepath)
self.assertEqual(gau.molecule.composition.formula, "H11 C5 N1")
self.assertIn("opt", gau.route_parameters)
self.assertEqual(gau.route_parameters["geom"], "connectivity")
self.assertEqual(gau.functional, "b3lyp")
self.assertEqual(gau.basis_set, "6-311+g(d,p)")
filepath = os.path.join(test_dir, "g305_hb.txt")
with open(filepath) as f:
txt = f.read()
toks = txt.split("--link1--")
for i, t in enumerate(toks):
lines = t.strip().split("\n")
lines = [l.strip() for l in lines]
gau = GaussianInput.from_string("\n".join(lines))
self.assertIsNotNone(gau.molecule)
if i == 0:
mol = gau.molecule
ans = """Molecule Summary (H4 O2)
Reduced Formula: H2O
Charge = 0, Spin Mult = 1
Sites (6)
1 O 0.000000 0.000000 0.000000
2 O 0.000000 0.000000 2.912902
3 H 0.892596 0.000000 -0.373266
4 H 0.143970 0.000219 0.964351
5 H -0.582554 0.765401 3.042783
6 H -0.580711 -0.766761 3.043012"""
self.assertEqual(str(mol), ans)
def test_from_string(self):
gau_str = """%mem=5000000
%chk=filename
# mp2/6-31g* scf=direct
SIH4+ H2---SIH2+ CS //MP2(full)/6-31G* MP2=-290.9225259
1,2
Si
X,1,1.
H,1,R1,2,HALF1
H,1,R1,2,HALF1,3,180.,0
X,1,1.,2,90.,3,90.,0
X,1,1.,5,THETA,2,180.,0
H,1,R3,6,HALF3,5,0.,0
H,1,R4,6,HALF3,7,180.,0
R1=1.47014
R3=1.890457
R4=1.83514
HALF1=60.633314
THETA=10.35464
HALF3=11.861807"""
gau = GaussianInput.from_string(gau_str)
self.assertEqual("X3SiH4", gau.molecule.composition.reduced_formula)
class GaussianOutputTest(unittest.TestCase):
# todo: Add unittest for PCM type output.
def setUp(self):
self.gauout = GaussianOutput(os.path.join(test_dir, "methane.log"))
def test_props(self):
gau = self.gauout
self.assertEqual(len(gau.energies), 3)
self.assertAlmostEqual(gau.energies[-1], -39.9768775602)
self.assertEqual(len(gau.structures), 4)
for mol in gau.structures:
self.assertEqual(mol.formula, 'H4 C1')
self.assertIn("OPT", gau.route)
self.assertEqual("Minimum", gau.stationary_type)
self.assertEqual("HF", gau.functional)
self.assertEqual("3-21G", gau.basis_set)
self.assertEqual(17, gau.num_basis_func)
d = gau.as_dict()
self.assertEqual(d["input"]["functional"], "HF")
self.assertAlmostEqual(d["output"]["final_energy"], -39.9768775602)
if __name__ == "__main__":
unittest.main()
|
yanikou19/pymatgen
|
pymatgen/io/tests/test_gaussianio.py
|
Python
|
mit
| 5,056
|
[
"pymatgen"
] |
acaf40c32dedc7871f92e53b503bf6bd7ceb3cd4fec9f2461c99ca3491432b36
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2017-10-25
# @Filename: base.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: Brian Cherinka
# @Last modified time: 2018-07-25 18:32:35
from __future__ import absolute_import, division, print_function
import copy as copy_mod
import os
import astropy.table as table
from astropy import units as u
from marvin.core.exceptions import MarvinError
from marvin.utils.datamodel import DataModelList
from marvin.utils.general.structs import FuzzyList
class DRPCubeDataModel(object):
"""A class representing a DRP Cube datamodel.
Parameters
----------
release : str
The DRP release this datamodel describes.
datacubes : list
A list of `.DataCube` instances that describe the datacubes in this
datamodel.
spectra : list
A list of `.Spectrum` instances that describe the datacubes in this
datamodel.
aliases : list
A list of aliases for this datamodel.
bitmask : dict
A dictionary of `~marvin.utils.general.maskbit.Maskbit` objects.
qual_flag : str
The name of the quality bitmask flag. Must not include the ``MANGA_``
prefix.
"""
def __init__(self, release, datacubes=[], spectra=[], aliases=[], bitmasks=None,
qual_flag='DRP3QUAL'):
self.release = release
self.aliases = aliases
self.datacubes = DataCubeList(datacubes, parent=self)
self.spectra = SpectrumList(spectra, parent=self)
self.bitmasks = bitmasks if bitmasks is not None else {}
self.qual_flag = qual_flag
def __repr__(self):
return ('<DRPCubeDataModel release={0!r}, n_datacubes={1}, n_spectra={2}>'
.format(self.release, len(self.datacubes), len(self.spectra)))
def copy(self):
"""Returns a copy of the datamodel."""
return copy_mod.deepcopy(self)
def __eq__(self, value):
"""Uses fuzzywuzzy to return the closest property match."""
datacube_names = [datacube.name for datacube in self.datacubes]
spectrum_names = [spectrum.name for spectrum in self.spectra]
if value in datacube_names:
return self.datacubes[datacube_names.index(value)]
elif value in spectrum_names:
return self.spectra[spectrum_names.index(value)]
try:
datacube_best_match = self.datacubes[value]
except ValueError:
datacube_best_match = None
try:
spectrum_best_match = self.spectra[value]
except ValueError:
spectrum_best_match = None
if ((datacube_best_match is None and spectrum_best_match is None) or
(datacube_best_match is not None and spectrum_best_match is not None)):
raise ValueError('too ambiguous input {!r}'.format(value))
elif datacube_best_match is not None:
return datacube_best_match
elif spectrum_best_match is not None:
return spectrum_best_match
def __contains__(self, value):
try:
match = self.__eq__(value)
if match is None:
return False
else:
return True
except ValueError:
return False
def __getitem__(self, value):
return self == value
def to_rss(self):
"""Returns a copy with `.RSSDatamodel` objects instead of datacubes."""
if isinstance(self, DRPRSSDataModel):
raise ValueError('this is already a DRPRSSDataModel')
copy_of_self = self.copy()
delattr(copy_of_self, 'datacubes')
# Chances the type and converts datacubes to RSSDatamodel
copy_of_self.__class__ = DRPRSSDataModel
copy_of_self.rss = self.datacubes.to_rss(copy_of_self)
# Resets the parent of the copied spectra
for spectrum in copy_of_self.spectra:
spectrum.parent = copy_of_self
return copy_of_self
class DRPRSSDataModel(DRPCubeDataModel):
"""A class representing a DRP RSS listdatamodel."""
def __init__(self, release, rss=[], spectra=[], aliases=[], bitmasks=None):
self.release = release
self.aliases = aliases
self.rss = RSSList(rss, parent=self)
self.spectra = SpectrumList(spectra, parent=self)
self.bitmasks = bitmasks if bitmasks is not None else {}
def __repr__(self):
return ('<DRPRSSDataModel release={0!r}, n_rss={1}, n_spectra={2}>'
.format(self.release, len(self.rss), len(self.spectra)))
class DRPCubeDataModelList(DataModelList):
"""A dictionary of DRP Cube datamodels."""
base = {'DRPCubeDataModel': DRPCubeDataModel}
def copy(self):
copy_of_self = super(DRPCubeDataModelList, self).copy()
copy_of_self.__class__ = DRPRSSDataModelList
return copy_of_self
class DRPRSSDataModelList(DataModelList):
"""A dictionary of DRP RSS datamodels."""
base = {'DRPRSSDataModel': DRPRSSDataModel}
class DataCubeList(FuzzyList):
"""Creates a list containing models and their representation."""
def __init__(self, the_list, parent=None):
self.parent = parent
super(DataCubeList, self).__init__([])
for item in the_list:
self.append(item, copy=True)
def copy(self):
"""Returns a copy of the datamodel."""
return copy_mod.deepcopy(self)
def mapper(self, value):
"""Helper method for the fuzzy list to match on the datacube name."""
return value.name
def append(self, value, copy=True):
"""Appends with copy."""
append_obj = value if copy is False else copy_mod.deepcopy(value)
append_obj.parent = self.parent
if isinstance(append_obj, DataCube):
super(DataCubeList, self).append(append_obj)
else:
raise ValueError('invalid datacube of type {!r}'.format(type(append_obj)))
def to_rss(self, new_parent):
"""Returns a copy of this list as an `.RSSList` object."""
if isinstance(self, RSSList):
raise ValueError('this is already an RSSList')
# Copies selef and resets the type to RSSList
copy_of_self = self.copy()
copy_of_self.__class__ = RSSList
copy_of_self.parent = new_parent
# Replaces each datacube in itself with a RSSDatamodel
for label in self.list_names():
copy_of_self.remove(copy_of_self[label])
copy_of_self.append(self[label].to_rss(new_parent))
return copy_of_self
def list_names(self):
"""Returns a list with the names of the datacubes in this list."""
return [item.name for item in self]
def to_table(self, pprint=False, description=False, max_width=1000):
"""Returns an astropy table with all the datacubes in this datamodel.
Parameters:
pprint (bool):
Whether the table should be printed to screen using astropy's
table pretty print.
description (bool):
If ``True``, an extra column with the description of the
datacube will be added.
max_width (int or None):
A keyword to pass to ``astropy.table.Table.pprint()`` with the
maximum width of the table, in characters.
Returns:
result (``astropy.table.Table``):
If ``pprint=False``, returns an astropy table containing
the name of the datacube, whether it has ``ivar`` or
``mask``, the units, and a description (if
``description=True``)..
"""
datacube_table = table.Table(
None, names=['name', 'ivar', 'mask', 'unit', 'description',
'db_table', 'db_column', 'fits_extension'],
dtype=['S20', bool, bool, 'S20', 'S500', 'S20', 'S20', 'S20'])
if self.parent:
datacube_table.meta['release'] = self.parent.release
for datacube in self:
unit = datacube.unit.to_string()
datacube_table.add_row((datacube.name,
datacube.has_ivar(),
datacube.has_mask(),
unit,
datacube.description,
datacube.db_table,
datacube.db_column(),
datacube.fits_extension()))
if not description:
datacube_table.remove_column('description')
if pprint:
datacube_table.pprint(max_width=max_width, max_lines=1e6)
return
return datacube_table
def write_csv(self, filename=None, path=None, overwrite=None, **kwargs):
''' Write the datamodel to a CSV '''
release = self.parent.release.lower().replace('-', '')
if not filename:
filename = 'drpcubes_dm_{0}.csv'.format(release)
if not path:
path = os.path.join(os.getenv("MARVIN_DIR"), 'docs', 'sphinx', '_static')
fullpath = os.path.join(path, filename)
table = self.to_table(**kwargs)
table.write(fullpath, format='csv', overwrite=overwrite)
class RSSList(DataCubeList):
"""Creates a list containing RSSDatamodel and their representation."""
def append(self, value, copy=True):
"""Appends with copy."""
append_obj = value if copy is False else copy_mod.deepcopy(value)
append_obj.parent = self.parent
if isinstance(append_obj, RSS):
super(RSSList, self).append(append_obj)
else:
raise ValueError('invalid RSS of type {!r}'.format(type(append_obj)))
def write_csv(self, filename=None, path=None, overwrite=None, **kwargs):
"""Write the datamodel to a CSV"""
release = self.parent.release.lower().replace('-', '')
if not filename:
filename = 'drprss_dm_{0}.csv'.format(release)
if not path:
path = os.path.join(os.getenv('MARVIN_DIR'), 'docs', 'sphinx', '_static')
fullpath = os.path.join(path, filename)
table = self.to_table(**kwargs)
table.write(fullpath, format='csv', overwrite=overwrite)
class DataCube(object):
"""Represents a extension in the DRP logcube file.
Parameters:
name (str):
The datacube name. This is the internal name that Marvin will use
for this datacube. It is different from the ``extension_name``
parameter, which must be identical to the extension name of the
datacube in the logcube file.
extension_name (str):
The FITS extension containing this datacube.
extension_wave (str):
The FITS extension containing the wavelength for this datacube.
extension_ivar (str or None):
The extension that contains the inverse variance associated with
this datacube, if any.
extension_mask (str or None):
The extension that contains the mask associated with this
datacube, if any.
db_table (str):
The DB table in which the datacube is stored. Defaults to
``spaxel``.
db_column (str):
An alternate DB column in which the datacube is stored. If none, defaults to
the FITS extension name.
unit (astropy unit or None):
The unit for this datacube.
scale (float):
The scaling factor for the values of the datacube.
formats (dict):
A dictionary with formats that can be used to represent the
datacube. Default ones are ``latex`` and ``string``.
pixmask_flag : str
The name of the pixmask flag. Should be the full name, including the
``MANGA_`` part.
description (str):
A description for the datacube.
"""
def __init__(self, name, extension_name, extension_wave=None,
extension_ivar=None, extension_mask=None, db_table='spaxel',
db_column=None, unit=u.dimensionless_unscaled, scale=1, formats={},
pixmask_flag='MANGA_DRP3PIXMASK', description=''):
self.name = name
self._extension_name = extension_name
self._extension_wave = extension_wave
self._extension_ivar = extension_ivar
self._extension_mask = extension_mask
self.pixmask_flag = pixmask_flag
self.db_table = db_table
self._db_column = db_column
self._parent = None
self.formats = formats
self.description = description
self.unit = u.CompositeUnit(scale, unit.bases, unit.powers)
def copy(self):
return copy_mod.deepcopy(self)
def to_rss(self, new_parent):
"""Creates a copy of this datacube as a `.RSS` object."""
if isinstance(self, RSS):
raise ValueError('this is already a RSS datamodel object.')
assert isinstance(new_parent, DRPRSSDataModel)
copy_of_self = self.copy()
copy_of_self.__class__ = RSS
copy_of_self.parent = new_parent
copy_of_self.db_table = 'rssfiber'
return copy_of_self
@property
def parent(self):
"""Retrieves the parent."""
return self._parent
@parent.setter
def parent(self, value):
"""Sets the parent."""
assert isinstance(value, DRPCubeDataModel), 'parent must be a DRPCubeDataModel'
self._parent = value
def full(self):
"""Returns the name string."""
return self._extension_name.lower()
def has_ivar(self):
"""Returns True is the datacube has an ivar extension."""
return self._extension_ivar is not None
def has_mask(self):
"""Returns True is the datacube has an mask extension."""
return self._extension_mask is not None
def fits_extension(self, ext=None):
"""Returns the FITS extension name."""
assert ext is None or ext in ['ivar', 'mask'], 'invalid extension'
if ext is None:
return self._extension_name.upper()
elif ext == 'ivar':
if not self.has_ivar():
raise MarvinError('no ivar extension for datacube {0!r}'.format(self.full()))
return self._extension_ivar.upper()
elif ext == 'mask':
if not self.has_mask():
raise MarvinError('no mask extension for datacube {0!r}'.format(self.full()))
return self._extension_mask
def db_column(self, ext=None):
"""Returns the name of the DB column containing this datacube.
If ``db_column`` is passed in as input to the ``DataCube`` datamodel, returns
the given name. Otherwise returns the name of the FITS extension.
"""
if self._db_column:
return self._db_column
return self.fits_extension(ext=ext).lower()
def __repr__(self):
return '<DataCube {!r}, release={!r}, unit={!r}>'.format(
self.name, self.parent.release if self.parent else None, self.unit.to_string())
def __str__(self):
return self.full()
def to_string(self, mode='string'):
"""Return a string representation of the datacube."""
if mode == 'latex':
if mode in self.formats:
latex = self.formats[mode]
else:
latex = self.to_string()
return latex
else:
if mode in self.formats:
string = self.formats[mode]
else:
string = self.name
return string
class RSS(DataCube):
def __repr__(self):
return '<RSS {!r}, release={!r}, unit={!r}>'.format(
self.name, self.parent.release if self.parent else None,
self.unit.to_string())
@property
def parent(self):
"""Retrieves the parent."""
return self._parent
@parent.setter
def parent(self, value):
"""Sets the parent."""
assert isinstance(value, DRPRSSDataModel), 'parent must be a DRPRSSDataModel'
self._parent = value
class SpectrumList(FuzzyList):
"""Creates a list containing spectra and their representation."""
def __init__(self, the_list, parent=None):
self.parent = parent
super(SpectrumList, self).__init__([])
for item in the_list:
self.append(item, copy=True)
def mapper(self, value):
"""Helper method for the fuzzy list to match on the spectrum name."""
return value.name
def append(self, value, copy=True):
"""Appends with copy."""
append_obj = value if copy is False else copy_mod.deepcopy(value)
append_obj.parent = self.parent
if isinstance(append_obj, Spectrum):
super(SpectrumList, self).append(append_obj)
else:
raise ValueError('invalid spectrum of type {!r}'.format(type(append_obj)))
def list_names(self):
"""Returns a list with the names of the spectra in this list."""
return [item.name for item in self]
def to_table(self, pprint=False, description=False, max_width=1000):
"""Returns an astropy table with all the spectra in this datamodel.
Parameters:
pprint (bool):
Whether the table should be printed to screen using astropy's
table pretty print.
description (bool):
If ``True``, an extra column with the description of the
spectrum will be added.
max_width (int or None):
A keyword to pass to ``astropy.table.Table.pprint()`` with the
maximum width of the table, in characters.
Returns:
result (``astropy.table.Table``):
If ``pprint=False``, returns an astropy table containing
the name of the spectrum, whether it has ``ivar`` or
``mask``, the units, and a description (if
``description=True``)..
"""
spectrum_table = table.Table(
None, names=['name', 'std', 'unit', 'description',
'db_table', 'db_column', 'fits_extension'],
dtype=['S20', bool, 'S20', 'S500', 'S20', 'S20', 'S20'])
if self.parent:
spectrum_table.meta['release'] = self.parent.release
for spectrum in self:
unit = spectrum.unit.to_string()
spectrum_table.add_row((spectrum.name,
spectrum.has_std(),
unit,
spectrum.description,
spectrum.db_table,
spectrum.db_column(),
spectrum.fits_extension()))
if not description:
spectrum_table.remove_column('description')
if pprint:
spectrum_table.pprint(max_width=max_width, max_lines=1e6)
return
return spectrum_table
def write_csv(self, filename=None, path=None, overwrite=None, **kwargs):
''' Write the datamodel to a CSV '''
release = self.parent.release.lower().replace('-', '')
if not filename:
if isinstance(self.parent, DRPRSSDataModel):
filename = 'drp_rss_spectra_dm_{0}.csv'.format(release)
elif isinstance(self.parent, DRPCubeDataModel):
filename = 'drp_cube_spectra_dm_{0}.csv'.format(release)
else:
raise ValueError('invalid parent of type {!r}'.format(type(self.parent)))
if not path:
path = os.path.join(os.getenv("MARVIN_DIR"), 'docs', 'sphinx', '_static')
fullpath = os.path.join(path, filename)
table = self.to_table(**kwargs)
table.write(fullpath, format='csv', overwrite=overwrite)
class Spectrum(object):
"""Represents a extension in the DRP logcube file.
Parameters:
name (str):
The spectrum name. This is the internal name that Marvin will use
for this spectrum. It is different from the ``extension_name``
parameter, which must be identical to the extension name of the
spectrum in the logcube file.
extension_name (str):
The FITS extension containing this spectrum.
extension_wave (str):
The FITS extension containing the wavelength for this spectrum.
extension_std (str):
The FITS extension containing the standard deviation for this
spectrum.
extension_mask (str):
The FITS extension containing the mask for this spectrum.
db_table (str):
The DB table in which the spectrum is stored. Defaults to
``cube``.
unit (astropy unit or None):
The unit for this spectrum.
scale (float):
The scaling factor for the values of the spectrum.
formats (dict):
A dictionary with formats that can be used to represent the
spectrum. Default ones are ``latex`` and ``string``.
pixmask_flag : str
The name of the pixmask flag. Should be the full name, including the
``MANGA_`` part.
description (str):
A description for the spectrum.
"""
def __init__(self, name, extension_name, extension_wave=None, extension_std=None,
extension_mask=None, db_table='cube', unit=u.dimensionless_unscaled,
scale=1, formats={}, pixmask_flag=None, description=''):
self.name = name
self._extension_name = extension_name
self._extension_wave = extension_wave
self._extension_std = extension_std
self._extension_mask = extension_mask
self.pixmask_flag = pixmask_flag
self.db_table = db_table
self.formats = formats
self.description = description
self._parent = None
self.unit = u.CompositeUnit(scale, unit.bases, unit.powers)
@property
def parent(self):
"""Retrieves the parent."""
return self._parent
@parent.setter
def parent(self, value):
"""Sets the parent."""
assert isinstance(value, DRPCubeDataModel), 'parent must be a DRPCubeDataModel'
self._parent = value
def full(self):
"""Returns the name string."""
return self._extension_name.lower()
def has_std(self):
"""Returns True is the datacube has an std extension."""
return self._extension_std is not None
def has_mask(self):
"""Returns True is the datacube has an mask extension."""
return self._extension_mask is not None
def fits_extension(self, ext=None):
"""Returns the FITS extension name."""
assert ext is None or ext in ['std', 'mask'], 'invalid extension'
if ext is None:
return self._extension_name.upper()
elif ext == 'std':
if not self.has_std():
raise MarvinError('no std extension for spectrum {0!r}'.format(self.full()))
return self._extension_std.upper()
elif ext == 'mask':
if not self.has_mask():
raise MarvinError('no mask extension for spectrum {0!r}'.format(self.full()))
return self._extension_mask
def db_column(self, ext=None):
"""Returns the name of the DB column containing this datacube."""
return self.fits_extension(ext=ext).lower()
def __repr__(self):
return '<Spectrum {!r}, release={!r}, unit={!r}>'.format(
self.name, self.parent.release if self.parent else None, self.unit.to_string())
def __str__(self):
return self.full()
def to_string(self, mode='string'):
"""Return a string representation of the spectrum."""
if mode == 'latex':
if mode in self.formats:
latex = self.formats[mode]
else:
latex = self.to_string()
return latex
else:
if mode in self.formats:
string = self.formats[mode]
else:
string = self.name
return string
|
sdss/marvin
|
python/marvin/utils/datamodel/drp/base.py
|
Python
|
bsd-3-clause
| 24,506
|
[
"Brian"
] |
e6b7a909f08c124920a1fe3ec1206fef6de5191d2e7aa0114628a1bc1f7cb1be
|
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from decimal import Decimal
import pytz
import logging
import smtplib
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.db import transaction
from django.db.models import Sum
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.exceptions import ItemNotFoundError
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.views import course_from_id
from student.models import CourseEnrollment, unenroll_done
from util.query import use_read_replica_if_available
from verify_student.models import SoftwareSecurePhotoVerification
from .exceptions import (InvalidCartItem, PurchasedCallbackException, ItemAlreadyInCartException,
AlreadyEnrolledInCourseException, CourseDoesNotExistException)
from microsite_configuration import microsite
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
('cart', 'cart'),
('purchased', 'purchased'),
('refunded', 'refunded'),
)
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk']) # pylint: disable=C0103
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def user_cart_has_items(cls, user):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
return cart.has_items()
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101
def has_items(self):
"""
Does the cart have any items in it?
"""
return self.orderitem_set.exists() # pylint: disable=E1101
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
for item in orderitems:
item.purchase_item()
# send confirmation e-mail
subject = _("Order Payment Confirmation")
message = render_to_string(
'emails/order_confirmation_email.txt',
{
'order': self,
'order_items': orderitems,
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO']
}
)
try:
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
send_mail(subject, message,
from_address, [self.user.email]) # pylint: disable=E1101
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id) # pylint: disable=E1101
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
class OrderItem(models.Model):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.commit_on_success
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
@property
def additional_instruction_text(self):
"""
Individual instructions for this order item.
Currently, only used for e-mails.
"""
return ''
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
course_id = models.CharField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [item.paidcourseregistration.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")]
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks
try:
course = course_from_id(course_id) # actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't
except ItemNotFoundError:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_id=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
try:
course_loc = CourseDescriptor.id_to_location(self.course_id)
course_exists = modulestore().has_item(self.course_id, course_loc)
except ValueError:
raise PurchasedCallbackException(
"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))
if not course_exists:
raise PurchasedCallbackException(
"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))
CourseEnrollment.enroll(user=self.user, course_id=self.course_id, mode=self.mode)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=E1101
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = (_('Please visit your <a href="{dashboard_link}">dashboard</a> to see your new enrollments.')
.format(dashboard_link=reverse('dashboard')))
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = models.CharField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
return u"{} : {}".format(self.course_id, self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
course_id = models.CharField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(unenroll_done)
def refund_cert_callback(sender, course_enrollment=None, **kwargs):
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if not course_enrollment.refundable():
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.error("Matching CertificateItem not found while trying to refund. User %s, Course %s", course_enrollment.user, course_enrollment.course_id)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.status = 'refunded'
target_cert.order.save()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
raise InvalidCartItem(_("Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id))
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
course_name = course_from_id(course_id).display_name
item.line_desc = _("Certificate of Achievement, {mode_name} for course {course}").format(mode_name=mode_info.name,
course=course_name)
item.currency = currency
order.currency = currency
order.save()
item.save()
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
try:
verification_attempt = SoftwareSecurePhotoVerification.active_for_user(self.course_enrollment.user)
verification_attempt.submit()
except Exception as e:
log.exception(
"Could not submit verification attempt for enrollment {}".format(self.course_enrollment)
)
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
@property
def single_item_receipt_template(self):
if self.mode == 'verified':
return 'shoppingcart/verified_cert_receipt.html'
else:
return super(CertificateItem, self).single_item_receipt_template
@property
def single_item_receipt_context(self):
course = course_from_id(self.course_id)
return {
"course_id" : self.course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"course_start_date_text": course.start_date_text,
"course_has_started": course.start > datetime.today().replace(tzinfo=pytz.utc),
}
@property
def additional_instruction_text(self):
return _("Note - you have up to 2 weeks into the course to unenroll from the Verified Certificate option "
"and receive a full refund. To receive your refund, contact {billing_email}. "
"Please include your order number in your e-mail. "
"Please do NOT include your credit card information.").format(
billing_email=settings.PAYMENT_SUPPORT_EMAIL)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
|
hkawasaki/kawasaki-aio8-1
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 27,423
|
[
"VisIt"
] |
5ed7383db9f1eed6c98e1fbab6fac87a69fb13d7e67e93e6c6311ee86ac15455
|
"""
Module for validating tests
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import object
from .tests import *
import netpyne.specs as specs
class ParamsObj(object):
"""
ParamsObj Class
Set of possible parameters
"""
def __init__ (self):
self.simConfig = specs.SimConfig() # object of class SimConfig to store simulation configuration
self.netParams = specs.NetParams() # object of class NetParams to store the network parameters
class RunNetPyneTests(object):
"""
RunNetPyneTests Class
Set of possible parameters
"""
def __init__ (self):
self.paramsMap = {}
self.netPyneTestObj = SimTestObj(verboseFlag = False)
self.loadTestsWithParams()
self.loadSimConfigTests()
self.runTestsWithParams()
def loadSimConfigTests(self):
# print ( " loading tests ")
self.paramsMap["simConfig"] = {}
# # duration
# self.paramsMap["simConfig"]["durationTest"] = []
# simConfigParams = ParamsObj()
#
# # Simulation parameters
# simConfigParams.simConfig.duration = simConfigParams.simConfig.tstop = 100.0 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = "a" # Internal integration timestep to use
#
# simConfigParams.simConfig.seeds = {'con': 0, 'stim': 0, 'loc': 0}
#
# simConfigParams.simConfig.createNEURONObj = 1 # create HOC objects when instantiating network
# simConfigParams.simConfig.createPyStruct = 1 # create Python structure (simulator-independent) when instantiating network
# simConfigParams.simConfig.verbose = 0 # show detailed messages
#
# # Recording
# simConfigParams.simConfig.recordCells = ['all']
#
# # Column: v_pop_pre_0_RS_v: Pop: pop_pre; cell: 0; segment id: $oc.segment_id; segment name: soma; Neuron loc: soma(0.5); value: v (v)
# simConfigParams.simConfig.recordTraces['Volts_file__pop_pre_pop_pre_0_soma_v'] = {'bla':1,'sec':'soma','loc':0.5,'var':'v','conds':{'pop':'pop_pre'}}#,'cellLabel':0}}
# # Column: v_pop_pre_1_RS_v: Pop: pop_pre; cell: 1; segment id: $oc.segment_id; segment name: soma; Neuron loc: soma(0.5); value: v (v)
# simConfigParams.simConfig.recordTraces['Volts_file__pop_pre_pop_pre_1_soma_v'] = {'sec':'soma','loc':0.5,'var':'v','conds':{'pop':'pop_pre'}}#, 'cellLabel':1}}
# # Column: v_pop_post_0_RS_v: Pop: pop_post; cell: 0; segment id: $oc.segment_id; segment name: soma; Neuron loc: soma(0.5); value: v (v)
# simConfigParams.simConfig.recordTraces['Volts_file__pop_post_pop_post_0_soma_v'] = {'sec':'soma','loc':0.5,'var':'v','conds':{'pop':'pop_post'}}#, 'cellLabel':0}}
#
# simConfigParams.simConfig.recordStim = True # record spikes of cell stims
# simConfigParams.simConfig.recordStep = simConfigParams.simConfig.dt # Step size in ms to save data (eg. V traces, LFP, etc)
#
# # Analysis and plottingsimConfig.plotRaster = True # Whether or not to plot a raster
# simConfigParams.simConfig.analysis.plotTraces = {'include': ['all']}
#
# # Saving
# simConfigParams.simConfig.saveJson=1
# simConfigParams.simConfig.saveFileStep = simConfigParams.simConfig.dt # step size in ms to save data to disk
#
# self.paramsMap["simConfig"]["durationTest"].append(simConfigParams)
# simConfigParams.simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = 0.025 # Internal integration timestep to use
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# # invalid duration
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.duration = "s" # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = 0.025 # Internal integration timestep to use
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["durationTest"].append(simConfigParams)
#
# # duration
# self.paramsMap["simConfig"]["dtTest"] = []
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = 0.025 # Internal integration timestep to use
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["dtTest"].append(simConfigParams)
#
# # invalid dt
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = "s" # Internal integration timestep to use
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["dtTest"].append(simConfigParams)
#
# # hParams
# self.paramsMap["simConfig"]["hParamsTest"] = []
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.hParams = {'celsius': 6.3, 'clamp_resist': 0.001}
# simConfigParams.simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = 0.025 # Internal integration timestep to use
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["hParamsTest"].append(simConfigParams)
#
# # invalid hParams
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.hParams = {'celsius11': 6.3, 'clamp_resist': 0.001}
# simConfigParams.simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = 1 # Internal integration timestep to use
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["hParamsTest"].append(simConfigParams)
#
# # seeds
# self.paramsMap["simConfig"]["seedsTest"] = []
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.hParams = {'celsius': 6.3, 'clamp_resist': 0.001}
# simConfigParams.simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = 0.025 # Internal integration timestep to use
# simConfigParams.simConfig.seeds ={'conn': 1, 'stim': 1, 'loc': 1}
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["seedsTest"].append(simConfigParams)
#
# # invalid seeds
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.hParams = {'celsius11': 6.3, 'clamp_resist': 0.001}
# simConfigParams.simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = 0.025 # Internal integration timestep to use
# # simConfigParams.simConfig.seeds ={'con': 1, 'stim': 1, 'loc': 1}
# simConfigParams.simConfig.seeds ="s"
#
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["seedsTest"].append(simConfigParams)
# # invalid seeds
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.hParams = {'celsius11': 6.3, 'clamp_resist': 0.001}
# simConfigParams.simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
# simConfigParams.simConfig.dt = 0.025 # Internal integration timestep to use
# simConfigParams.simConfig.seeds ={'con': 1, 'stim': 1, 'loc': 1}
# #simConfigParams.simConfig.seeds ="s"
#
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["seedsTest"].append(simConfigParams)
# self.paramsMap["simConfig"]["plotRasterTest"] = []
# # invalid seeds
# simConfigParams = ParamsObj()
# simConfigParams.simConfig.verbose = False # Show detailed messages
# simConfigParams.simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
# simConfigParams.simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
# simConfigParams.simConfig.filename = 'model_output' # Set file output name
# simConfigParams.simConfig.savePickle = False # Save params, network and sim output to pickle file
#
# simConfigParams.simConfig.analysis['plotRaster'] = {'bla':1,'syncLines': True} # Plot a raster
# simConfigParams.simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
# simConfigParams.simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
#
# self.paramsMap["simConfig"]["plotRasterTest"].append(simConfigParams)
def loadTestsWithParams(self):
# print ( " loading tests ")
self.paramsMap["pop"] = {}
self.paramsMap["net"] = {}
self.paramsMap["conn"] = {}
self.paramsMap["cell"] = {}
self.paramsMap["stimSource"] = {}
self.paramsMap["stimTarget"] = {}
self.paramsMap["pop"]["cellModelTest"] = []
cellModelParams = ParamsObj()
cellModelParams.netParams.popParams['validCellModelParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'numCells': 50} # add dict with params for this pop
self.paramsMap["pop"]["cellModelTest"].append(cellModelParams)
cellModelParams = ParamsObj()
cellModelParams.netParams.popParams['invalidCellModelParams'] = {'cellType': 'PYR', 'numCells': 50} # add dict with params for this pop
self.paramsMap["pop"]["cellModelTest"].append(cellModelParams)
#
# self.paramsMap["pop"]["volumeParamsTest"] = []
#
# volumeParams = ParamsObj()
# volumeParams.netParams.popParams['validVolumeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50} # add dict with params for this pop
# self.paramsMap["pop"]["volumeParamsTest"].append(volumeParams)
#
# volumeParams = ParamsObj()
# volumeParams.netParams.popParams['invalidVolumeParams'] = {'cellType': 'PYR', 'cellModel': 'HH'} # add dict with params for this pop
# self.paramsMap["pop"]["volumeParamsTest"].append(volumeParams)
#
# self.paramsMap["pop"]["xNormRangeParamsTest"] = []
#
# params = ParamsObj()
# params.netParams.popParams['validxNormRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'xnormRange' : [0.6,0.9]} # add dict with params for this pop
# self.paramsMap["pop"]["xNormRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidxNormRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'xnormRange' : 0.6} # add dict with params for this pop
# self.paramsMap["pop"]["xNormRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidxNormRangeParams1'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'xnormRange' : [6,10]} # add dict with params for this pop
# self.paramsMap["pop"]["xNormRangeParamsTest"].append(params)
#
# self.paramsMap["pop"]["yNormRangeParamsTest"] = []
#
# params = ParamsObj()
# params.netParams.popParams['validyNormRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'ynormRange' : [0.6,0.9]} # add dict with params for this pop
# self.paramsMap["pop"]["yNormRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidyNormRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'ynormRange' : 0.6} # add dict with params for this pop
# self.paramsMap["pop"]["yNormRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidyNormRangeParams1'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'ynormRange' : [6,10]} # add dict with params for this pop
# self.paramsMap["pop"]["yNormRangeParamsTest"].append(params)
#
# self.paramsMap["pop"]["zNormRangeParamsTest"] = []
#
# params = ParamsObj()
# params.netParams.popParams['validzNormRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'znormRange' : [0.6,0.9]} # add dict with params for this pop
# self.paramsMap["pop"]["zNormRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidzNormRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'znormRange' : 0.6} # add dict with params for this pop
# self.paramsMap["pop"]["zNormRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidzNormRangeParams1'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'znormRange' : [6,10]} # add dict with params for this pop
# self.paramsMap["pop"]["zNormRangeParamsTest"].append(params)
#
# self.paramsMap["pop"]["zNormRangeParamsTest"] = []
#
# params = ParamsObj()
# params.netParams.popParams['validzNormRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'znormRange' : [0.6,0.9]} # add dict with params for this pop
# self.paramsMap["pop"]["zNormRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidzNormRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'znormRange' : 0.6} # add dict with params for this pop
# self.paramsMap["pop"]["zNormRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidzNormRangeParams1'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'znormRange' : [6,10]} # add dict with params for this pop
# self.paramsMap["pop"]["zNormRangeParamsTest"].append(params)
#
self.paramsMap["pop"]["xRangeParamsTest"] = []
params = ParamsObj()
params.netParams.sizeX = 70 # max size for network
params.netParams.popParams['validxRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'xRange' : [30,60]} # add dict with params for this pop
self.paramsMap["pop"]["xRangeParamsTest"].append(params)
# params = ParamsObj()
# params.netParams.sizeX = 70.0 # max size for network
# params.netParams.popParams['invalidxRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'xRange' : [40,90]} # add dict with params for this pop
# self.paramsMap["pop"]["xRangeParamsTest"].append(params)
#
# self.paramsMap["pop"]["yRangeParamsTest"] = []
#
# params = ParamsObj()
# params.netParams.sizeY = 70 # max size for network
# params.netParams.popParams['validyRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'yRange' : [30,60]} # add dict with params for this pop
# self.paramsMap["pop"]["yRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.sizeY = 70 # max size for network
# params.netParams.popParams['invalidyRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'yRange' : [40,90]} # add dict with params for this pop
# self.paramsMap["pop"]["yRangeParamsTest"].append(params)
#
# self.paramsMap["pop"]["zRangeParamsTest"] = []
#
# params = ParamsObj()
# params.netParams.sizeZ = 70 # max size for network
# params.netParams.popParams['validzRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'zRange' : [30,60]} # add dict with params for this pop
# self.paramsMap["pop"]["zRangeParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.sizeZ = 70 # max size for network
# params.netParams.popParams['invalidzRangeParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50, 'zRange' : [40,90]} # add dict with params for this pop
# self.paramsMap["pop"]["zRangeParamsTest"].append(params)
#
# self.paramsMap["pop"]["popStimParamsTest"] = []
#
# params = ParamsObj()
# params.netParams.popParams['validPopStimParams1'] = {'cellModel': 'IntFire2', 'taum': 100, 'noise': 0.5, 'numCells': 100} # Intfire2
# self.paramsMap["pop"]["popStimParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['validPopStimParams2'] = {'cellModel': 'NetStim', 'rate': 100, 'noise': 0.5, 'numCells': 100} # NetsStim
# self.paramsMap["pop"]["popStimParamsTest"].append(params)
#
# params = ParamsObj()
# # create custom list of spike times
# spkTimes = range(0,1000,20) + [138, 155,270]
# # create list of pulses (each item is a dict with pulse params)
# pulses = [{'start': 10, 'end': 100, 'rate': 200, 'noise': 0.5},{'start': 400, 'end': 500, 'rate': 1, 'noise': 0.0}]
# params.netParams.popParams['validPopStimParams3'] = {'cellModel': 'VecStim', 'numCells': 100, 'spkTimes': spkTimes, 'pulses': pulses} # VecStim with spike times
# self.paramsMap["pop"]["popStimParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidPopStimParams1'] = {'cellModel': 'IntFire2', 'taum': 100, 'noise': 2, 'numCells': 100} # Intfire2
# self.paramsMap["pop"]["popStimParamsTest"].append(params)
#
# params = ParamsObj()
# params.netParams.popParams['invalidPopStimParams2'] = {'cellModel': 'NetStim', 'rate': '2', 'noise': 0.5, 'numCells': 100} # NetsStim
# self.paramsMap["pop"]["popStimParamsTest"].append(params)
#
# params = ParamsObj()
# # create custom list of spike times
# spkTimes = 1000
# # create list of pulses (each item is a dict with pulse params)
# pulses = [{'start': 10, 'end': 100, 'rate': 200, 'noise': 0.5},{'start': 400, 'end': 500, 'rate': 1, 'noise': 0.0}]
# params.netParams.popParams['invalidPopStimParams3'] = {'cellModel': 'VecStim', 'numCells': 100, 'spkTimes': spkTimes, 'pulses': pulses} # VecStim with spike times
# self.paramsMap["pop"]["popStimParamsTest"].append(params)
#
# params = ParamsObj()
# # create custom list of spike times
# spkTimes = range(0,1000,20) + [138, 155,270]
# # create list of pulses (each item is a dict with pulse params)
# pulses = [{'start': 10, 'end ': 100, 'rate': 200, 'noise': 0.5}, {'start': 400, 'end': 500, 'rate': 1, 'noise': 0.0}]
# params.netParams.popParams['invalidPopStimParams4'] = {'cellModel': 'VecStim', 'numCells': 100, 'spkTimes': spkTimes, 'pulses': pulses} # VecStim with spike times
# self.paramsMap["pop"]["popStimParamsTest"].append(params)
#
# #net params test
self.paramsMap["net"]["sizeXParamsTest"] = []
params = ParamsObj()
params.netParams.sizeX = 70 # max size for network
self.paramsMap["net"]["sizeXParamsTest"].append(params)
params = ParamsObj()
params.netParams.sizeX = "abc" # max size for network
self.paramsMap["net"]["sizeXParamsTest"].append(params)
params = ParamsObj()
params.netParams.sizeX = -44 # max size for network
self.paramsMap["net"]["sizeXParamsTest"].append(params)
#
# self.paramsMap["net"]["shapeTest"] = []
#
# params = ParamsObj()
# params.netParams.shape = "cuboid" # max size for network
# self.paramsMap["net"]["shapeTest"].append(params)
#
# params = ParamsObj()
# params.netParams.shape = "ellipsoid" # max size for network
# self.paramsMap["net"]["shapeTest"].append(params)
#
# params = ParamsObj()
# params.netParams.shape = "cylinder" # max size for network
# self.paramsMap["net"]["shapeTest"].append(params)
#
# params = ParamsObj()
# params.netParams.shape = "sphere" # max size for network
# self.paramsMap["net"]["shapeTest"].append(params)
#
# #
# # # cell params test
# self.paramsMap["cell"]["condsTest"] = []
#
# # valid cell conds rule
# params = ParamsObj()
# cellRule = {'conds': {'cellType': 'E2', 'cellModel': 'simple'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['validConds'] = cellRule # add dict with params for this pop
# #print ( str(cellRule["conds"]) )
# self.paramsMap["cell"]["condsTest"].append(params)
#
# # valid cell conds rule
# params = ParamsObj()
# cellRule = {'conds': 'test', 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['inValidConds1'] = cellRule # add dict with params for this pop
# #print ( str(cellRule["conds"]) )
# self.paramsMap["cell"]["condsTest"].append(params)
#
# # invalid cell conds rule
# params = ParamsObj()
# cellRule = { 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['inValidConds2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["condsTest"].append(params)
#
# # # cell params test
# self.paramsMap["cell"]["secsTest"] = []
#
# # invalid sec type rule
# params = ParamsObj()
# cellRule = { 'secs': 'test'} # cell rule dict
# params.netParams.cellParams['inValidSecs1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["secsTest"].append(params)
#
# # cell types test
# self.paramsMap["cell"]["cellTypesTest"] = []
#
# # valid cell type rule
# params = ParamsObj()
# params.netParams.popParams['validCellModelParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'numCells': 50} # add dict with params for this pop
# cellRule = {'conds': {'cellType': 'PYR'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['validCellTypes'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["cellTypesTest"].append(params)
#
# # invalid cell type rule
# params = ParamsObj()
# params.netParams.popParams['validCellModelParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'numCells': 50} # add dict with params for this pop
# cellRule = { 'conds': {'cellType': 'PY1'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['inValidCellTypes'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["cellTypesTest"].append(params)
#
# # cell params test
# self.paramsMap["cell"]["cellModelsTest"] = []
#
# # valid cell model rule
# params = ParamsObj()
# params.netParams.popParams['validCellModelParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'numCells': 50} # add dict with params for this pop
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['validCellModel'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["cellModelsTest"].append(params)
#
# # invalid cell model rule
# params = ParamsObj()
# params.netParams.popParams['validCellModelParams'] = {'cellType': 'PYR', 'cellModel': 'HH', 'numCells': 50} # add dict with params for this pop
# cellRule = { 'conds': {'cellModel': 'H1' }, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['inValidCellModel'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["cellModelsTest"].append(params)
# #
# # geom test
# self.paramsMap["cell"]["geomTest"] = []
# #
# # # valid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['validGeom'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # invalid geom rule
# params = ParamsObj()
# cellRule = { 'conds': {'cellModel': 'H1' }, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = { 'mechs': {}} # soma params dict
# params.netParams.cellParams['inValidGeom'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# #valid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['validGeom1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # valid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0, 'pt3d' : [] } # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['validGeom2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # valid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'pt3d' : [] } # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['validGeom3'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # valid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'pt3d' : [[1,2,3,4],[3,4,5,6]] } # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['validGeom4'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # invalid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'pt3d' : 2.3 } # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['invalidGeom1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # invalid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam' : 2.3 } # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['invalidGeom2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # invalid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'xy' : 2.3 } # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['invalidGeom3'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # invalid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['invalidGeom4'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # invalid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'pt3d':[2,3,4]} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['invalidGeom5'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # invalid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'pt3d':[[2,3,4],[3,4,5]]} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['invalidGeom6'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # invalid geom rule
# params = ParamsObj()
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'pt3d':[[2,3,4,4],[3,4,"a",3]]} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
# params.netParams.cellParams['invalidGeom7'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["geomTest"].append(params)
#
# # # topology test
# self.paramsMap["cell"]["topologyTest"] = []
#
# # valid topology rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['validTopology1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["topologyTest"].append(params)
#
# # invalid topology rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidTopology1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["topologyTest"].append(params)
#
# # invalid topology rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidTopology2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["topologyTest"].append(params)
#
# # invalid topology rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidTopology3'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["topologyTest"].append(params)
#
# # invalid topology rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma1', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidTopology4'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["topologyTest"].append(params)
#
# # invalid topology rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 2.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidTopology5'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["topologyTest"].append(params)
#
# # invalid topology rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 2.0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidTopology6'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["topologyTest"].append(params)
#
# # mechs test
# self.paramsMap["cell"]["mechsTest"] = []
#
# # valid mechs rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['validMechs1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["mechsTest"].append(params)
#
# # invalid mechs rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidMechs1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["mechsTest"].append(params)
#
# # invalid mechs rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357} # dend mechanisms
#
# params.netParams.cellParams['invalidMechs2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["mechsTest"].append(params)
#
# # invalid mechs rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl1': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidMechs3'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["mechsTest"].append(params)
# #
# # # invalid mechs rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e1': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidMechs4'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["mechsTest"].append(params)
#
# # ions test
# self.paramsMap["cell"]["ionsTest"] = []
#
# # valid ions rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['soma']['mechs']['k_ion'] = {'i':10,'e':20,'o':30} # potassium ions
# cellRule['secs']['soma']['mechs']['na_ion'] = {'o':3,'i':4,'e':5} # sodium ions
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['validIons1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["ionsTest"].append(params)
#
# # invalid ions rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['soma']['mechs']['k_ion'] = {'x':10} # potassium ions
# cellRule['secs']['soma']['mechs']['na_ion'] = {'y':3} # sodium ions
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidIons1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["ionsTest"].append(params)
#
# # invalid mechs rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['soma']['mechs']['k_ion'] = {'i':10} # potassium ions
# cellRule['secs']['soma']['mechs']['na_ion'] = {'o':3} # sodium ions
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'e': 0.0000357, 'g':0.3} # dend mechanisms
#
# params.netParams.cellParams['invalidIons2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["ionsTest"].append(params)
#
# # invalid mechs rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl1': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['soma']['mechs']['mg_ion'] = {'mg1':10} # mg ions
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidIons3'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["ionsTest"].append(params)
#
# # invalid mechs rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e1': -70} # dend mechanisms
#
# params.netParams.cellParams['invalidIons4'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["ionsTest"].append(params)
#
# # pointps test
# self.paramsMap["cell"]["pointpsTest"] = []
#
# # valid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = {'mod':'Izhi2007b', 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}
#
# params.netParams.cellParams['validPointPs1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["pointpsTest"].append(params)
#
# # invalid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = { 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}
#
# params.netParams.cellParams['invalidPointPs1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["pointpsTest"].append(params)
#
# # invalid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = {'mod':'Izhi2007b', 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1,'synList' :'q'}
#
# params.netParams.cellParams['invalidPointPs2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["pointpsTest"].append(params)
#
# # invalid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = {'mod':'Izhi2007b', 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1,'loc' :4}
#
# params.netParams.cellParams['invalidPointPs2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["pointpsTest"].append(params)
#
# # secList test
# self.paramsMap["cell"]["secListTest"] = []
#
# # valid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = {'mod':'Izhi2007b', 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
#
# cellRule['secList'] = {'apicdend': ['soma','dend'], 'basaldend':['dend']}
#
# params.netParams.cellParams['validSecList'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["secListTest"].append(params)
#
# # valid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = {'mod':'Izhi2007b', 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
#
# cellRule['secList'] = {'apicdend': ['somax','dend'], 'basaldend':['dend']}
#
# params.netParams.cellParams['invalidSecList1'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["secListTest"].append(params)
#
# # valid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = {'mod':'Izhi2007b', 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
#
# cellRule['secList'] = {'apicdend': 'soma', 'basaldend':['dend']}
#
# params.netParams.cellParams['invalidSecList2'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["secListTest"].append(params)
#
# # secList test
# self.paramsMap["cell"]["spikeGenLocTest"] = []
#
# # # valid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = {'mod':'Izhi2007b', 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
#
# cellRule['secs']['axon'] = {'geom': {}, 'topol': {}, 'mechs': {}}
# cellRule['secs']['axon']['spikeGenLoc'] = 0.7
#
# params.netParams.cellParams['validSpikeGneLoc'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["spikeGenLocTest"].append(params)
#
# # valid pointps rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['pointps']['Izhi'] = {'mod':'Izhi2007b', 'C':1, 'k':0.7, 'vr':-60, 'vt':-40, 'vpeak':35, 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
#
# cellRule['secs']['axon'] = {'geom': {}, 'topol': {}, 'mechs': {}}
# cellRule['secs']['axon']['spikeGenLoc'] = 1.7
#
# params.netParams.cellParams['invalidSpikeGneLoc'] = cellRule # add dict with params for this pop
# self.paramsMap["cell"]["spikeGenLocTest"].append(params)
# #
# #
# # # conn test
# self.paramsMap["conn"]["preCondsTest"] = []
#
# # valid mechs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'delay': 5} # delay
#
# params.netParams.connParams['validPreConds1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["preCondsTest"].append(params)
#
# # invalid conds rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': 2.3 , 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'delay': 5} # delay
#
# params.netParams.connParams['invalidPreConds1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["preCondsTest"].append(params)
#
# # conn test
# self.paramsMap["conn"]["postCondsTest"] = []
#
# # invalid conds rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'delay': 5} # delay
#
# params.netParams.connParams['validPostConds1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["postCondsTest"].append(params)
#
# # invalid conds rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': 2.3,
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'delay': 5} # delay
#
# params.netParams.connParams['invalidPostConds1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["postCondsTest"].append(params)
#
# # loc (optional) - Location of target synaptic mechanism (e.g. 0.3)
# # If omitted, defaults to 0.5.
# # If have list of synMechs, can have single loc for all, or list of locs (one per synMech, e.g. for 2 synMechs: [0.4, 0.7]).
# # If have synsPerConn > 1, can have single loc for all, or list of locs (one per synapse, e.g. if synsPerConn = 3: [0.4, 0.5, 0.7])
# # If have both a list of synMechs and synsPerConn > 1, can have a 2D list for each synapse of each synMech (e.g. for 2 synMechs and synsPerConn = 3: [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]])
#
# # conn test
# self.paramsMap["conn"]["connsLocTest"] = []
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : 1,
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsLoc0'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : [0.5,0.7],
# 'weight': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsLoc1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'weight': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 3,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsLoc2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'weight': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidConnsLoc1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 3,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidConnsLoc2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # invalid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : 1.5,
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidConnsLoc3'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # conn test
# self.paramsMap["conn"]["connsWeightTest"] = []
#
# # valid weights rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight' : 1,
# 'loc': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsWeight0'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsWeightTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight' : [0.5,0.7],
# 'loc': 1.0,
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsWeight1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsWeightTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'loc': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 3,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsWeight2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsWeightTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'loc': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidConnsWeight1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsWeightTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'loc': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 3,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidConnsWeight2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsWeightTest"].append(params)
#
# # invalid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight' : 1.5,
# 'loc': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidConnsWeight3'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsWeightTest"].append(params)
# # conn test
# self.paramsMap["conn"]["connsDelayTest"] = []
#
# # valid weights rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'delay' : 1,
# 'loc': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'synsPerConn': 1,
# } # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validconnsDelay0'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsDelayTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'delay' : [0.5,0.7],
# 'loc': 1.0,
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 1,
# } # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validconnsDelay1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsDelayTest"].append(params)
# #
# # # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'delay' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'loc': 0.0, # delay of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 3,
# } # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validconnsDelay2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsDelayTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'delay' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'loc': 0.0, # delay of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 1,
# } # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidconnsDelay1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsDelayTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'delay' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'loc': 0.0, # delay of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 3,
# } # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidconnsDelay2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsDelayTest"].append(params)
#
# # invalid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'delay' : 1.5,
# 'loc': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'synsPerConn': 1,
# } # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidconnsDelay3'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsDelayTest"].append(params)
#
#
# # conn test
# self.paramsMap["conn"]["synMechsTest"] = []
#
# # valid locs rule
# params = ParamsObj()
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : [0.5,0.7],
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# params.netParams.connParams['validSynMechs1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["synMechsTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'weight': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 3,
# 'delay': 5} # delay
#
# params.netParams.connParams['validSynMechs1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["synMechsTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'weight': 0.0, # weight of each connection
# 'synMech': 'XYZ', # target inh synapse
# 'synsPerConn': 3,
# 'delay': 5} # delay
#
# params.netParams.connParams['invalidSynMechs1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["synMechsTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'loc' : [[0.2, 0.3, 0.5], [0.5, 0.6, 0.7]],
# 'weight': 0.0, # weight of each connection
# 'synMech': ['XYZ','ABC'], # target inh synapse
# 'synsPerConn': 3,
# 'delay': 5} # delay
#
# params.netParams.connParams['invalidSynMechs2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["synMechsTest"].append(params)
#
# # conn test
# self.paramsMap["conn"]["popLabelsTest"] = []
#
# # valid pop labels rule
# params = ParamsObj()
#
# params.netParams.popParams['popLabel1'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50} # add dict with params for this pop
# params.netParams.popParams['popLabel2'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50} # add dict with params for this pop
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'popLabel1'}, 'postConds': {'popLabel': 'popLabel2'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'delay': 5} # delay
#
# params.netParams.connParams['validPopLabels1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["popLabelsTest"].append(params)
#
# # valid pop labels rule
# params = ParamsObj()
#
# params.netParams.popParams['popLabel1'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50} # add dict with params for this pop
# params.netParams.popParams['popLabel2'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50} # add dict with params for this pop
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'popLabel2'}, 'postConds': {'popLabel': 'popLabel3'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'delay': 5} # delay
#
# params.netParams.connParams['invalidPopLabels1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["popLabelsTest"].append(params)
# # conn test
# self.paramsMap["conn"]["popLabelsTest"] = []
#
# # valid pop labels rule
# params = ParamsObj()
#
# params.netParams.popParams['popLabel1'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50} # add dict with params for this pop
# params.netParams.popParams['popLabel2'] = {'cellType': 'PYR', 'cellModel': 'HH', 'density' : 0.8, 'numCells': 50} # add dict with params for this pop
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'popLabel1'}, 'postConds': {'popLabel': 'popLabel2'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'delay': 5} # delay
#
# params.netParams.connParams['validPopLabels1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["popLabelsTest"].append(params)
#
# # conn test
# self.paramsMap["conn"]["secListTest"] = []
#
# # valid pop labels rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl1': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# cellRule['secList'] = {'apicdend': ['soma','dend'], 'basaldend':['dend']}
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'popLabel2'}, 'postConds': {'popLabel': 'popLabel3'},
# 'sec': 'apicdend',
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# params.netParams.cellParams["cellParams1"] = cellRule
#
# params.netParams.connParams['validSecList1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["secListTest"].append(params)
#
# # valid pop labels rule
# params = ParamsObj()
#
# cellRule = {'conds': {'cellModel': 'HH'}, 'secs': {}} # cell rule dict
# cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict
# cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry
# cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl1': 0.003, 'el': -70} # soma hh mechanism
#
# cellRule['secs']['dend'] = {'geom': {}, 'topol': {}, 'mechs': {}} # dend params dict
# cellRule['secs']['dend']['geom'] = {'diam': 5.0, 'L': 150.0, 'Ra': 150.0, 'cm': 1} # dend geometry
# cellRule['secs']['dend']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0} # dend topology
# cellRule['secs']['dend']['mechs']['pas'] = {'g': 0.0000357, 'e': -70} # dend mechanisms
#
# cellRule['secList'] = {'apicdend': ['soma','dend'], 'basaldend':['dend']}
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'popLabel2'}, 'postConds': {'popLabel': 'popLabel3'},
# 'sec': 'apicdend1',
# 'weight': 0.0, # weight of each connection
# 'synMech': 'inh', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# params.netParams.cellParams["cellParams1"] = cellRule
#
# params.netParams.connParams['invalidSecList1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["secListTest"].append(params)
# #
# # # conn test
# self.paramsMap["conn"]["connListTest"] = []
#
# # valid pop labels rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'connList' : [[0,1],[2,1]],
# 'weight': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsLoc1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connListTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'connList' : [[[0.1,0.2], [0.1,0.3], [0.1,0.5]], [[0.5,0.1], [0.1,0.6], [0.1,0.7]]],
# 'weight': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 3,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsLoc2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'connList' : [0.1,0.2],
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5} # delay
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['validConnsLoc3'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'connList' : [[[0.1,0.2], [0.1,0.3], [0.1,0.5]], [[0.5,0.1], [0.1,0.6], [0.1,0.7]]],
# 'weight': 0.0, # weight of each connection
# 'synMech': ['AMPA','NMDA'], # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5}
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidConnsLoc1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# # valid locs rule
# params = ParamsObj()
#
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'connList' : [[0.1,0.2], [0.1,0.3]],
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5}
#
# # Synaptic mechanism parameters
# params.netParams.synMechParams['AMPA'] = {'mod': 'Exp2Syn', 'tau1': 0.05, 'tau2': 5.3, 'e': 0} # AMPA
# params.netParams.synMechParams['NMDA'] = {'mod': 'Exp2Syn', 'tau1': 0.15, 'tau2': 15, 'e': 0} # NMDA
# params.netParams.synMechParams['GABAA'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAA
# params.netParams.synMechParams['GABAB'] = {'mod': 'Exp2Syn', 'tau1': 0.07, 'tau2': 9.1, 'e': -80} # GABAB
#
# params.netParams.connParams['invalidConnsLoc2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsLocTest"].append(params)
#
# self.paramsMap["conn"]["connsHierarchyTest"] = []
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'probability':0.5,
# 'shape': {'switchOnOff': [200, 800], 'pulseType': 'square', 'pulsePeriod': 100, 'pulseWidth': 50},
# }
#
# params.netParams.connParams['validHierarchy1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsHierarchyTest"].append(params)
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'probability':0.5,
# 'convergence': 0.5,
# }
#
# params.netParams.connParams['invalidHierarchy1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsHierarchyTest"].append(params)
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'convergence': 0.5,
# 'divergence':0.5,
# }
#
# params.netParams.connParams['invalidHierarchy2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsHierarchyTest"].append(params)
#
# self.paramsMap["conn"]["connsShapeTest"] = []
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'shape': {'switchOnOff': [200, 800], 'pulseType': 'square', 'pulsePeriod': 100, 'pulseWidth': 50},
# }
#
# params.netParams.connParams['validShape1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsShapeTest"].append(params)
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'shape': {'switchOnOff': 200, 'pulseType': 'square', 'pulsePeriod': 100, 'pulseWidth': 50},
# }
#
# params.netParams.connParams['invalidShape1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsShapeTest"].append(params)
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'shape': {'switchOnOff': ['200','300'], 'pulseType': 'square', 'pulsePeriod': 100, 'pulseWidth': 50},
# }
#
# params.netParams.connParams['invalidShape2'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsShapeTest"].append(params)
#
# self.paramsMap["conn"]["connsPlasticityTest"] = []
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'plasticity': {'mech': 'STDP', 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 ,'tauhebb': 10}},
# }
#
# params.netParams.connParams['validPlasticity1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsPlasticityTest"].append(params)
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'plasticity': { 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 ,'tauhebb': 10}},
# }
#
# params.netParams.connParams['invalidPlasticity1'] = connRule # add dict with params for this pop
# self.paramsMap["conn"]["connsPlasticityTest"].append(params)
#
# self.paramsMap["stimSource"]["stimSourceTest"] = []
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'plasticity': { 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 ,'tauhebb': 10}},
# }
#
# params.netParams.stimSourceParams['Input_1'] = {'type': 'IClamp', 'delay': 10, 'dur': 800, 'amp': 'uniform(0.05,0.5)'}
# params.netParams.stimSourceParams['Input_2'] = {'type': 'VClamp', 'dur':[0,1,1], 'amp':[1,1,1],'gain':1, 'rstim':0, 'tau1':1, 'tau2':1, 'i':1}
# params.netParams.stimSourceParams['Input_3'] = {'type': 'AlphaSynapse', 'onset': 'uniform(1,500)', 'tau': 5, 'gmax': 'post_ynorm', 'e': 0}
# params.netParams.stimSourceParams['Input_4'] = {'type': 'NetStim', 'interval': 'uniform(20,100)', 'number': 1000, 'start': 5, 'noise': 0.1}
#
# # Stimulation mapping parameters
# params.netParams.stimTargetParams['Input1->PYR'] = {
# 'source': 'Input_1',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'pop':'PYR', 'cellList': range(8)}}
# params.netParams.stimTargetParams['Input3->Basket'] = {
# 'source': 'Input_3',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'cellType':'Basket'}}
# params.netParams.stimTargetParams['Input4->PYR3'] = {
# 'source': 'Input_4',
# 'sec':'soma',
# 'loc': 0.5,
# 'weight': '0.1+gauss(0.2,0.05)',
# 'delay': 1,
# 'conds': {'pop':'PYR3', 'cellList': [0,1,2,5,10,14,15]}}
#
# params.netParams.connParams['validStimSource1'] = connRule # add dict with params for this pop
# self.paramsMap["stimSource"]["stimSourceTest"].append(params)
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'plasticity': { 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 ,'tauhebb': 10}},
# }
#
# params.netParams.stimSourceParams['Input_1'] = {'type': 'XYClamp', 'delay': 10, 'dur': 800, 'amp': 'uniform(0.05,0.5)'}
# params.netParams.stimSourceParams['Input_2'] = {'type': 'VClamp', 'dur':[0,1,1], 'amp':[1,1,1],'gain':1, 'rstim':0, 'tau1':1, 'tau2':1, 'i':1}
# params.netParams.stimSourceParams['Input_3'] = {'type': 'AlphaSynapse', 'onset': 'uniform(1,500)', 'tau': 5, 'gmax': 'post_ynorm', 'e': 0}
# params.netParams.stimSourceParams['Input_4'] = {'type': 'NetStim', 'interval': 'uniform(20,100)', 'number': 1000, 'start': 5, 'noise': 0.1}
#
# # Stimulation mapping parameters
# params.netParams.stimTargetParams['Input1->PYR'] = {
# 'source': 'Input_1',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'pop':'PYR', 'cellList': range(8)}}
# params.netParams.stimTargetParams['Input3->Basket'] = {
# 'source': 'Input_3',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'cellType':'Basket'}}
# params.netParams.stimTargetParams['Input4->PYR3'] = {
# 'source': 'Input_4',
# 'sec':'soma',
# 'loc': 0.5,
# 'weight': '0.1+gauss(0.2,0.05)',
# 'delay': 1,
# 'conds': {'pop':'PYR3', 'cellList': [0,1,2,5,10,14,15]}}
#
# params.netParams.connParams['invalidStimSource1'] = connRule # add dict with params for this pop
# self.paramsMap["stimSource"]["stimSourceTest"].append(params)
# #
# self.paramsMap["stimTarget"]["stimTargetTest"] = []
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'plasticity': { 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 ,'tauhebb': 10}},
# }
#
# params.netParams.stimSourceParams['Input_1'] = {'type': 'IClamp', 'delay': 10, 'dur': 800, 'amp': 'uniform(0.05,0.5)'}
# params.netParams.stimSourceParams['Input_2'] = {'type': 'VClamp', 'dur':[0,1,1], 'amp':[1,1,1],'gain':1, 'rstim':0, 'tau1':1, 'tau2':1, 'i':1}
# params.netParams.stimSourceParams['Input_3'] = {'type': 'AlphaSynapse', 'onset': 'uniform(1,500)', 'tau': 5, 'gmax': 'post_ynorm', 'e': 0}
# params.netParams.stimSourceParams['Input_4'] = {'type': 'NetStim', 'interval': 'uniform(20,100)', 'number': 1000, 'start': 5, 'noise': 0.1}
#
# # Stimulation mapping parameters
# params.netParams.stimTargetParams['Input1->PYR'] = {
# 'source': 'Input_1',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'pop':'PYR', 'cellList': range(8)}}
# params.netParams.stimTargetParams['Input3->Basket'] = {
# 'source': 'Input_3',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'cellType':'Basket'}}
# params.netParams.stimTargetParams['Input4->PYR3'] = {
# 'source': 'Input_4',
# 'sec':'soma',
# 'loc': 0.5,
# 'weight': '0.1+gauss(0.2,0.05)',
# 'delay': 1,
# 'conds': {'pop':'PYR3', 'cellList': [0,1,2,5,10,14,15]}}
#
# params.netParams.connParams['validStimTarget1'] = connRule # add dict with params for this pop
# self.paramsMap["stimTarget"]["stimTargetTest"].append(params)
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'plasticity': { 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 ,'tauhebb': 10}},
# }
#
# params.netParams.stimSourceParams['Input_1'] = {'type': 'IClamp', 'delay': 10, 'dur': 800, 'amp': 'uniform(0.05,0.5)'}
# params.netParams.stimSourceParams['Input_2'] = {'type': 'VClamp', 'dur':[0,1,1], 'amp':[1,1,1],'gain':1, 'rstim':0, 'tau1':1, 'tau2':1, 'i':1}
# params.netParams.stimSourceParams['Input_3'] = {'type': 'AlphaSynapse', 'onset': 'uniform(1,500)', 'tau': 5, 'gmax': 'post_ynorm', 'e': 0}
# params.netParams.stimSourceParams['Input_4'] = {'type': 'NetStim', 'interval': 'uniform(20,100)', 'number': 1000, 'start': 5, 'noise': 0.1}
#
# # Stimulation mapping parameters
# params.netParams.stimTargetParams['Input1->PYR'] = {
# 'source': 'Input_1',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'pop':'PYR', 'cellList': range(8)}}
# params.netParams.stimTargetParams['Input3->Basket'] = {
# 'source': 'Input_3',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'cellType':'Basket'}}
# params.netParams.stimTargetParams['Input4->PYR3'] = {
# 'source': 'Input_4',
# 'sec':'soma',
# 'loc': 0.5,
# 'weight': '0.1+gauss(0.2,0.05)',
# 'delay': 1,
# 'conds': {'pop':'PYR3', 'cellList': [0,1,2,5,10,14,15]}}
#
# params.netParams.connParams['validStimTarget1'] = connRule # add dict with params for this pop
# self.paramsMap["stimTarget"]["stimTargetTest"].append(params)
#
# params = ParamsObj()
# # Connectivity parameters
# connRule = {
# 'preConds': {'popLabel': 'hop'}, 'postConds': {'popLabel': 'hop'},
# 'weight': 0.0, # weight of each connection
# 'synMech': 'AMPA', # target inh synapse
# 'synsPerConn': 1,
# 'delay': 5,
# 'plasticity': { 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 ,'tauhebb': 10}},
# }
#
# params.netParams.stimSourceParams['Input_1'] = {'type': 'IClamp', 'delay': 10, 'dur': 800, 'amp': 'uniform(0.05,0.5)'}
# params.netParams.stimSourceParams['Input_2'] = {'type': 'VClamp', 'dur':[0,1,1], 'amp':[1,1,1],'gain':1, 'rstim':0, 'tau1':1, 'tau2':1, 'i':1}
# params.netParams.stimSourceParams['Input_3'] = {'type': 'AlphaSynapse', 'onset': 'uniform(1,500)', 'tau': 5, 'gmax': 'post_ynorm', 'e': 0}
# params.netParams.stimSourceParams['Input_4'] = {'type': 'NetStim', 'interval': 'uniform(20,100)', 'number': 1000, 'start': 5, 'noise': 0.1}
#
# # Stimulation mapping parameters
# params.netParams.stimTargetParams['Input1->PYR'] = {
# 'source': 'Input_11',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'pop':'PYR', 'cellList': range(8)}}
# params.netParams.stimTargetParams['Input3->Basket'] = {
# 'source': 'Input_3',
# 'sec':'soma',
# 'loc': 0.5,
# 'conds': {'cellType':'Basket'}}
# params.netParams.stimTargetParams['Input4->PYR3'] = {
# 'source': 'Input_4',
# 'sec':'soma',
# 'loc': 0.5,
# 'weight': '0.1+gauss(0.2,0.05)',
# 'delay': 1,
# 'conds': {'pop':'PYR3', 'cellList': [0,1,2,5,10,14,15]}}
#
# params.netParams.connParams['invalidStimTarget1'] = connRule # add dict with params for this pop
# self.paramsMap["stimTarget"]["stimTargetTest"].append(params)
def runTestsWithParams(self):
self.runPopTestsWithParams()
self.runNetTestsWithParams()
self.runCellTestsWithParams()
self.runConnTestsWithParams()
self.runStimSourceTests()
self.runStimTargetTests()
self.runSimConfigTests()
def runPopTestsWithParams(self):
popParamsMap = self.paramsMap["pop"]
# run the different tests for pop
for testName, paramObjList in list(popParamsMap.items()):
# run the test with different params
for paramsObj in paramObjList:
self.netPyneTestObj.netParams = paramsObj.netParams
self.netPyneTestObj.runTests()
def runNetTestsWithParams(self):
netParamsMap = self.paramsMap["net"]
# run the different tests for net
for testName, paramObjList in list(netParamsMap.items()):
# run the test with different params
for paramsObj in paramObjList:
self.netPyneTestObj.netParams = paramsObj.netParams
self.netPyneTestObj.runTests()
def runCellTestsWithParams(self):
#print ( " run cell tests ")
cellParamsMap = self.paramsMap["cell"]
# run the different tests for cell
for testName, paramObjList in list(cellParamsMap.items()):
for paramsObj in paramObjList:
self.netPyneTestObj.netParams = paramsObj.netParams
self.netPyneTestObj.runTests()
def runConnTestsWithParams(self):
#print ( " #### running conn tests " )
connParamsMap = self.paramsMap["conn"]
#print (" connParamsMap = " + str(connParamsMap))
# run the different tests for conn
for testName, paramObjList in list(connParamsMap.items()):
for paramsObj in paramObjList:
#print ( " calling tests")
self.netPyneTestObj.netParams = paramsObj.netParams
self.netPyneTestObj.runTests()
def runStimSourceTests(self):
#print ( " running conn tests " )
stimSourceParamsMap = self.paramsMap["stimSource"]
# run the different tests for conn
for testName, paramObjList in list(stimSourceParamsMap.items()):
for paramsObj in paramObjList:
#print ( " calling tests")
self.netPyneTestObj.netParams = paramsObj.netParams
self.netPyneTestObj.runTests()
def runStimTargetTests(self):
#print ( " running conn tests " )
stimTargetParamsMap = self.paramsMap["stimTarget"]
# run the different tests for conn
for testName, paramObjList in list(stimTargetParamsMap.items()):
for paramsObj in paramObjList:
#print ( " calling tests")
self.netPyneTestObj.netParams = paramsObj.netParams
self.netPyneTestObj.runTests()
def runSimConfigTests(self):
#print ( " running conn tests " )
simConfigParamsMap = self.paramsMap["simConfig"]
# run the different tests for conn
for testName, paramObjList in list(simConfigParamsMap.items()):
#print ( " calling tests 00 " + testName)
for paramsObj in paramObjList:
#print ( " calling tests " + testName)
self.netPyneTestObj.netParams = paramsObj.netParams
self.netPyneTestObj.simConfig = paramsObj.simConfig
self.netPyneTestObj.runTests()
runNetPyneTests = RunNetPyneTests()
#runNetPyneTests.runTestsWithParams()
|
Neurosim-lab/netpyne
|
netpyne/tests/validate_tests.py
|
Python
|
mit
| 147,030
|
[
"NEURON"
] |
574439bb0f2fcd159ce0af37701736500f851241759e2e33070a844fc2248916
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import pcraster as pcr
import netCDF4 as nc
import virtualOS as vos
# obtaining system arguments containing: clone_map, input_netcdf_filename, output_pcraster_filename, variable_name, date_yyyy_mm_dd
system_argument = sys.argv
#~ # TODO: help/hint about the system arguments needed to be provided
#~ if sys.argv == "--help":
# set clone map
clone_map_filename = sys.argv[1]
pcr.setclone(clone_map_filename)
# set input_netcdf_filename
input_netcdf_filename = sys.argv[2]
# set output_pcraster_filename
output_pcraster_filename = sys.argv[3]
# set variable_name
variable_name = None
if len(sys.argv) > 4: variable_name = sys.argv[4]
if variable_name == None:
# loop through variables keys and identify the first variable
variable_names = f.variables.keys()
# ignoring some variable names
variable_names.pop('lat','')
variable_names.pop('lon','')
variable_names.pop('latiudes','')
variable_names.pop('longitudes','')
variable_names.pop('latiude','')
variable_names.pop('longitude','')
variable_names.pop('time','')
# use the first variable
variable_name = str(variable_names[0])
msg = 'Converting '+variable_name+' from the file:'+input_netcdf_filename+' to '+output_pcraster_filename
print msg
# set date_yyyy_mm_dd
date_yyyy_mm_dd = None
if len(sys.argv) > 5: date_yyyy_mm_dd = sys.argv[5]
# read netcdf file
if date_yyyy_mm_dd == None:
map_value = vos.netcdf2PCRobjCloneWithoutTime(input_netcdf_filename,\
variable_name,\
clone_map_filename)
else:
map_value = vos.netcdf2PCRobjClone(input_netcdf_filename,\
variable_name,\
date_yyyy_mm_dd,\
clone_map_filename)
# save the map as pcraster map
pcr.report(map_value, output_pcraster_filename)
|
edwinkost/edwin_simple_tools
|
netcdf_to_pcraster/netcdf_to_pcraster.py
|
Python
|
gpl-2.0
| 2,042
|
[
"NetCDF"
] |
d66a02ac6444469014717aed2084d9fed4c5ee0f24b702742141f31818469776
|
"""
Runs Palmapper on single-end or paired-end data.
"""
import optparse, os, sys, tempfile, shutil, time, re
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
#os.environ['PATH']=os.environ['PATH']+":/home/galaxy/software/samtools.svn"+":/home/galaxy/software/palmapper-trunk"
stime = time.asctime( time.localtime(time.time()) )
print '----------------------------------------------'
print 'PALMapper started on ' + stime
print '----------------------------------------------'
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option('', '--logfile', dest='logfile', help='log file')
#Read files
parser.add_option('', '--paired', dest='paired', help='Whether the data is single- or paired-end')
parser.add_option('', '--input1', dest='input1', help='The (forward or single-end) reads file in Sanger FASTQ format')
parser.add_option('', '--input2', dest='input2', help='The reverse reads file in Sanger FASTQ format')
parser.add_option('', '--strand', dest='strand', help='Strand information (left or right)')
parser.add_option('', '--protocol', dest='protocol', help='Protocol used (first or second)')
#Reference genome and index information
parser.add_option('', '--indexSource', dest='indexSource', default='array', help='The type of index: bwa or array')
parser.add_option('', '--genomeSource', dest='genomeSource', help='The type of reference provided')
parser.add_option('', '--ref', dest='ref', help='The reference genome to use or index')
parser.add_option('', '--indexSettings', dest='index_settings', help='Whether or not indexing options are to be set')
parser.add_option('', '--seedlength', dest='seedlength', help='Index Seed Length')
# Splice site predictions
parser.add_option('', '--ss-pred', dest='ss_pred', help='use splice site predictions')
parser.add_option('', '--acc', dest='acc', help='Acceptor SS predictions')
parser.add_option('', '--don', dest='don', help='Donor SS predictions')
#Output files
parser.add_option('', '--format', dest='format', help='Output format (bedx, sam or bam)')
parser.add_option('', '--unspliced-output', dest='unspliced_output', help='The Bedx output file for unspliced reads')
parser.add_option('', '--spliced-output', dest='spliced_output', help='The Bedx output file for spliced reads')
parser.add_option('', '--sam-output', dest='sam_output', help='The SAM output file for both spliced and unspliced reads')
parser.add_option('', '--bam-output', dest='bam_output', help='The BAM output file for both spliced and unspliced reads')
parser.add_option('', '--bamsort', dest='bamsorting', help='Type of sorting for BAM output (unsorted, position or read)')
parser.add_option('', '--include-unmapped', dest='unmapped_included', help='Whether unmapped reads are included in output file (only for SAM and BAM format)')
parser.add_option('', '--coverage-map', dest='coverage', help='Whether the coverage map should be output')
parser.add_option('', '--junctions', dest='junctions', help='Whether the intron junction library should be built')
parser.add_option('', '--coverage-output', dest='coverage_output', help='Coverage map output')
parser.add_option('', '--junctions-output', dest='junctions_output', help='Intron junctions file')
#GenomeMapper parameters
parser.add_option('', '--params', dest='params', help='Whether to use default or specified parameters for GenomeMapper')
parser.add_option('', '--alignseedlength', dest='alignseedlength', help='Alignment Seed Length')
parser.add_option('', '--maxmismatches', dest='maxmismatches', help='Maximal number of mismatches')
parser.add_option('', '--maxgaps', dest='maxgaps', help='Maximal number of gaps')
parser.add_option('', '--maxedits', dest='maxedits', help='Maximal number of edit operations')
parser.add_option('', '--seedhitcancel', dest='seedhitcancel', help='Number of hits of a seed that lead to its ignoration')
parser.add_option('', '--threads', dest='threads', help='The number of threads to run')
parser.add_option('', '--topalignment', dest='topalignment', help='Number of top alignments to report')
parser.add_option('', '--reportall', dest='reportall', help='Report all alignments')
#QPALMA parameters
parser.add_option('', '--qpalma', dest='qpalma', help='QPALMA parameter file')
parser.add_option('', '--qpalma-params', dest='qpalma_params', help='Whether to use default or specified parameters for QPALMA')
parser.add_option('', '--mmtrigger', dest='mmtrigger', help='Mismatch threshold to trigger spliced alignments')
parser.add_option('', '--gtrigger', dest='gtrigger', help='Gap threshold to trigger spliced alignments')
parser.add_option('', '--maxalign', dest='maxalign', help='Maximal number of spliced alignments per read')
parser.add_option('', '--aligntrigger', dest='aligntrigger', help='Minimal length of long hit')
parser.add_option('', '--alignshorttrigger', dest='alignshorttrigger', help='Minimal length of short hit')
parser.add_option('', '--aligncombinedtrigger', dest='aligncombinedtrigger', help='Minimal combined length')
parser.add_option('', '--maxintronlength', dest='maxintronlength', help='Maximal intron length')
parser.add_option('', '--maxintronnum', dest='maxintronnum', help='Maximal number of introns')
parser.add_option('', '--qmm', dest='qmm', help='Number of matches required for identifying a splice site')
parser.add_option('', '--clustertol', dest='clustertol', help='Distance in nt to tolerate between hit and existing hit cluster')
parser.add_option('', '--qpalma-use-map-max-len', dest='mapmaxlen', help='Up and downstream limit of map extension')
#parser.add_option('', '--filter_ss_tophit', dest='filter_ss_tophit', help='filter_ss_tophit')
parser.add_option('', '--report_ss', dest='report_ss', help='Splice site-based alignment regions')
parser.add_option('', '--reportmappedread', dest='reportmappedread', help='Use mapped unspliced reads for determining alignment regions')
parser.add_option('', '--reportsplicedread', dest='reportsplicedread', help='Use mapped spliced reads for determining alignment regions')
parser.add_option('', '--rtrim', dest='rtrim', help='Minimal length of read when trimming the righ side')
parser.add_option('', '--rtrim-step', dest='rtrim_step', help='Right trimming step')
parser.add_option('', '--polytrim', dest='polytrim', help='Minimal length of read when trimming polyA or polyT ends')
parser.add_option('', '--junction-remapping', dest='junction_remapping', help='Intron junctions file for remapping strategy (Gff3 format)')
parser.add_option('', '--junction-coverage', dest='junction_coverage', help='Minimal intron junction support for remapping strategy')
parser.add_option('', '--non-consensus-search', dest='non_consensus', help='Whether spliced alignments with non consensus sequences as splice sites are searched')
(options, args) = parser.parse_args()
# index if necessary
if options.genomeSource == 'history':
if options.indexSource == 'array':
# set up commands
if options.index_settings =='index_pre_set':
indexing_cmds = ''
else:
try:
indexing_cmds = '%s ' % \
(('','-s %s'%options.seedlength)[options.seedlength!='None' and options.seedlength>=1])
except ValueError:
indexing_cmds = ''
# make temp directory for placement of indices and copy reference file there
tmp_dir = tempfile.gettempdir()
try:
os.system('cp %s %s' % (options.ref, tmp_dir))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.ref = os.path.join(tmp_dir, os.path.split(options.ref)[1])
cmd1 = 'pmindex -v -i %s %s' % (options.ref, indexing_cmds)
try:
os.system(cmd1)
except Exception, erf:
stop_err('Error indexing reference sequence\n' + str(erf))
else:
# make temp directory for placement of indices and copy reference file there
tmp_dir = tempfile.gettempdir()
try:
os.system('cp %s %s' % (options.ref, tmp_dir))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.ref = os.path.join(tmp_dir, os.path.split(options.ref)[1])
cmd1 = 'bwa index %s' % (options.ref)
try:
os.system(cmd1)
except Exception, erf:
stop_err('Error indexing reference sequence\n' + str(erf))
#GenomeMapper parameters
if options.params == 'pre_set':
# Auto values for: -M -G -E -z
# Supporting only one thread
aligning_cmds = '-l 18 -seed-hit-cancel-threshold 10000 '
else:
try:
aligning_cmds = '%s %s %s %s %s %s %s ' % \
(('','-l %s' % options.alignseedlength)[options.alignseedlength!='None'],
('','-M %s' % options.maxmismatches)[options.maxmismatches!='None'],
('','-G %s' % options.maxgaps)[options.maxgaps!='None'],
('','-E %s' % options.maxedits)[options.maxedits!='None'],
('','-seed-hit-cancel-threshold %s' % options.seedhitcancel)[options.seedhitcancel!='None'],
#('','-threads %s' % options.threads)[options.threads!='None'],
('','-z %s' % options.topalignment)[options.topalignment!='None'],
('','-a')[options.reportall!='false'])
except ValueError, erf:
stop_err('Something is wrong with the alignment parameters and the alignment could not be run\n' + str(erf))
#Index type
aligning_cmds+=('','-bwa 12 ')[options.indexSource=="bwa"]
#QPALMA parameters
if options.qpalma_params == 'pre_set':
# Auto values: -L -K -C -I -NI -QMM
qpalma_cmds = '-filter-max-mismatches 1 -filter-max-gaps 0 -SA 10 -CT 10 -qpalma-use-map-max-len 5000 -report-splice-sites 0.9 -report-map-read -report-spliced-read -report-map-region -S '
else:
try:
#print options
qpalma_cmds = '%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s -S ' % \
(('','-filter-max-mismatches %s' % options.mmtrigger)[options.mmtrigger!='None'],
('','-filter-max-gaps %s' % options.gtrigger)[options.gtrigger!='None'],
('','-SA %s' % options.maxalign)[options.maxalign!='None'],
('','-L %s' % options.aligntrigger)[options.aligntrigger!='None'],
('','-K %s' % options.alignshorttrigger)[options.alignshorttrigger!='None'],
('','-C %s' % options.aligncombinedtrigger)[options.aligncombinedtrigger!='None'],
('','-I %s' % options.maxintronlength)[options.maxintronlength!='None'],
('','-NI %s' % options.maxintronnum)[options.maxintronnum!='None'],
('','-QMM %s' % options.qmm)[options.qmm!='None'],
('','-CT %s' % options.clustertol)[options.clustertol!='None'],
('','-qpalma-use-map-max-len %s' % options.mapmaxlen)[options.mapmaxlen!='None'],
('','-report-splice-sites %s' % options.report_ss)[options.report_ss!='None'],
('','-rtrim %s ' % options.rtrim)[options.rtrim!='None'],
('','-rtrim-step %s ' % options.rtrim_step)[options.rtrim!='None'],
('','-polytrim %s ' % options.polytrim)[options.polytrim!='None'],
('','-junction-remapping %s ' % options.junction_remapping)[options.junction_remapping != 'None'],
('','-junction-remapping-coverage %s ' % options.junction_coverage)[options.junction_remapping != 'None'])
qpalma_cmds +=('','-report-spliced-read ')[options.reportsplicedread=='true']
qpalma_cmds +=('','-report-map-read ')[options.reportmappedread=='true']
qpalma_cmds +=('','-non-consensus-search ')[options.non_consensus=='true']
except ValueError, erf:
stop_err('Something is wrong with the QPALMA alignment parameters and the alignment could not be run\n' + str(erf))
# creating copy of critical files on local tmp file system
# Reference genome
index_tmp_dname = tempfile.mkdtemp(suffix='', prefix='gmindex_tmp_', dir=None) ;
if options.ref[0:5]!='/tmp/':
try:
os.system('cp %s* %s' % (options.ref, index_tmp_dname))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.ref = os.path.join(index_tmp_dname, os.path.split(options.ref)[1])
#Splice site predictions
if (options.ss_pred == "true"):
acc_tmp_dname = tempfile.mkdtemp(suffix='', prefix='acc_', dir=None) ;
if os.path.isdir(os.path.join(options.acc,'pred')):
try:
os.system('cp %s/pred/contig_* %s' % (options.acc, acc_tmp_dname))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
else:
try:
os.system('cp %s/contig_* %s' % (options.acc, acc_tmp_dname))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.acc = os.path.join(acc_tmp_dname, 'contig_%i%c')
don_tmp_dname = tempfile.mkdtemp(suffix='', prefix='don_', dir=None) ;
if os.path.isdir(os.path.join(options.don,'pred')):
try:
os.system('cp %s/pred/contig_* %s' % (options.don, don_tmp_dname))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
else:
try:
os.system('cp %s/contig_* %s' % (options.don, don_tmp_dname))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.don = os.path.join(don_tmp_dname, 'contig_%i%c')
ss_cmds = '-acc %s -don %s ' % (options.acc, options.don)
else:
ss_cmds = '-no-ss-pred '
# prepare actual aligning commands
(report_file, report_fname) = tempfile.mkstemp(suffix='', prefix='report_', dir=None)
os.close(report_file)
try:
os.unlink(report_fname)
except:
pass
## Output files
if options.format == 'sam':
output_cmd='-f sam -o %s ' % options.sam_output
if options.unmapped_included == 'true':
output_cmd+='-include-unmapped-reads '
else:
(unmapped_tmp_file, unmapped_tmp_fname) = tempfile.mkstemp(suffix='', prefix='unmapped_', dir=None) ;
os.close(unmapped_tmp_file) ;
output_cmd+='-u %s ' % unmapped_tmp_file
elif options.format == 'bam':
if options.bamsorting == "position":
output_cmd='-f bamp '
elif options.bamsorting == "read":
output_cmd='-f bamn '
else:
output_cmd='-f bam '
output_cmd+='-o %s ' % options.bam_output
if options.unmapped_included == 'true':
output_cmd+='-include-unmapped-reads '
else:
(unmapped_tmp_file, unmapped_tmp_fname) = tempfile.mkstemp(suffix='', prefix='unmapped_', dir=None) ;
os.close(unmapped_tmp_file) ;
output_cmd+='-u %s ' % unmapped_tmp_file
else: #bedx output
(unmapped_tmp_file, unmapped_tmp_fname) = tempfile.mkstemp(suffix='', prefix='unmapped_', dir=None) ;
os.close(unmapped_tmp_file) ;
output_cmd='-f bedx -o %s -H %s -u %s ' % (options.unspliced_output,options.spliced_output, unmapped_tmp_file)
if options.coverage == 'true':
output_cmd+='-report-coverage-wig %s ' % options.coverage_output
if options.junctions == 'true':
output_cmd+='-report-junctions %s ' % options.junctions_output
## Input files
if options.paired == 'paired':
input_cmd='-q1 %s -q2 %s ' % (options.input1, options.input2)
if options.protocol != 'unstranded':
input_cmd+='-protocol %s ' % options.protocol
else:
assert( options.paired == 'single' )
input_cmd='-q %s ' % options.input1
if options.strand != 'unstranded':
input_cmd+='-strand %s ' % options.strand
if options.protocol != 'unstranded':
input_cmd+='-protocol %s ' % options.protocol
cmd2a = 'palmapper %s %s -i %s %s %s -qpalma %s %s -report %s -threads 2 -qpalma-prb-offset-fix >> %s' % (aligning_cmds, qpalma_cmds, options.ref, input_cmd, output_cmd, options.qpalma, ss_cmds, report_fname, options.logfile)
# align
try:
#os.environ['LD_LIBRARY_PATH']='/home/galaxy/svn/projects/QPalma/dyn_prog/cpplib/:/home/galaxy/software/shogun/lib/'
# print re.sub(r'palmapper', r'GALAXY-SOFTWARE-DIR', cmd2a)
#print re.sub(r'/home/galaxy/software/palmapper-0.4/palmapper', r'GALAXY-SOFTWARE-DIR', cmd2a)
print cmd2a
os.system(cmd2a)
except Exception, erf:
stop_err("Error aligning sequence\n" + str(erf))
try:
shutil.rmtree(index_tmp_dname)
shutil.rmtree(acc_tmp_dname)
shutil.rmtree(don_tmp_dname)
os.unlink(report_fname)
except:
pass
etime = time.asctime( time.localtime(time.time()) )
print '----------------------------------------------'
print 'PALMapper finished on ' + etime
print '----------------------------------------------'
if __name__=="__main__": __main__()
|
ratschlab/palmapper
|
galaxy/palmapper_wrapper.py
|
Python
|
gpl-3.0
| 18,261
|
[
"BWA",
"Galaxy"
] |
67f7b87a66ad4badfe1ad7b97995587cfa96105f67aee1216bb26cc97d04fe5a
|
from pyjade import Compiler as _Compiler
from pyjade.runtime import attrs, escape, iteration
import tornado.template
from pyjade.utils import process
from pyjade.exceptions import CurrentlyNotSupported
ATTRS_FUNC = '__pyjade_attrs'
ESCAPE_FUNC = '__pyjade_escape'
ITER_FUNC = '__pyjade_iter'
class Compiler(_Compiler):
def visitCodeBlock(self,block):
self.buffer('{%% block %s %%}'%block.name)
if block.mode=='append': self.buffer('{% raw super() %}')
self.visitBlock(block)
if block.mode=='prepend': self.buffer('{% raw super() %}')
self.buffer('{% end %}')
# def visitMixin(self,mixin):
# if mixin.block:
# self.buffer('{%% macro %s(%s) %%}'%(mixin.name,mixin.args))
# self.visitBlock(mixin.block)
# self.buffer('{% end %}')
# else:
# self.buffer('{%% raw %s(%s)} %%}'%(mixin.name,mixin.args))
def interpolate(self, text, escape=True):
return self._interpolate(text,lambda x:'{%% raw %s(%s) %%}' % (ESCAPE_FUNC, x))
def visitMixin(self,mixin):
raise CurrentlyNotSupported('mixin')
def visitAssignment(self,assignment):
self.buffer('{%% set %s = %s %%}'%(assignment.name,assignment.val))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
val = self.var_processor(val)
if code.escape:
self.buf.append('{%% raw %s(%s) %%}' % (ESCAPE_FUNC, val))
else:
self.buf.append('{%% raw %s %%}' % val)
else:
self.buf.append('{%% %s %%}'%code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ',1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}'%codeTag)
def visitEach(self,each):
self.buf.append('{%% for %s in %s(%s,%s) %%}'%(','.join(each.keys),ITER_FUNC,each.obj,len(each.keys)))
self.visit(each.block)
self.buf.append('{% end %}')
def visitConditional(self,conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append('{%% %s %%}'%TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']: self.buf.append('{% end %}')
def attributes(self,attrs):
return "{%% raw %s(%s) %%}" % (ATTRS_FUNC, attrs)
class Template(tornado.template.Template):
def __init__(self, template_string, name="<string>", *args,**kwargs):
is_jade = name.endswith(".jade")
if is_jade:
template_string = process(template_string,filename=name,compiler=Compiler)
super(Template, self).__init__(template_string, name, *args,**kwargs)
if is_jade:
self.namespace.update(
{ATTRS_FUNC:attrs,
ESCAPE_FUNC:escape,
ITER_FUNC:iteration}
)
# Patch tornado template engine for preprocess jade templates
def patch_tornado():
tornado.template.Template = Template
|
syrusakbary/pyjade
|
pyjade/ext/tornado/__init__.py
|
Python
|
mit
| 3,489
|
[
"VisIt"
] |
e65edff989f6eb71e94dc050da778a0fe249c8bde528e98af59436f6fc35e239
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-service-ports
# Author : Stuart Paterson
########################################################################
"""
Print the service ports for the specified setup
Example:
$ dirac-admin-service-ports
{'Framework/ProxyManager': 9152,
'Framework/SystemAdministrator': 9162,
'Framework/UserProfileManager': 9155,
'WorkloadManagement/JobManager': 9132,
'WorkloadManagement/PilotManager': 9171,
'WorkloadManagement/Matcher': 9170,
'WorkloadManagement/SandboxStore': 9196,
'WorkloadManagement/WMSAdministrator': 9145}
"""
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("Setup: Name of the setup", default="", mandatory=False)
Script.parseCommandLine(ignoreErrors=True)
setup = Script.getPositionalArgs(group=True)
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
result = diracAdmin.getServicePorts(setup, printOutput=True)
if result["OK"]:
DIRAC.exit(0)
else:
print(result["Message"])
DIRAC.exit(2)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_service_ports.py
|
Python
|
gpl-3.0
| 1,318
|
[
"DIRAC"
] |
ed3c4b5437db3d8792c608d2933365362a2a0cd5334a3760177242728099d4f1
|
from __future__ import print_function, absolute_import
import math
from numba import cuda, float32, float64, uint32, int64, uint64, from_dtype,\
jit
import numpy as np
# This implementation is based upon the xoroshiro128+ and splitmix64 algorithms
# described at:
#
# http://xoroshiro.di.unimi.it/
#
# and originally implemented by David Blackman and Sebastiano Vigna.
#
# The implementations below are based on the C source code:
#
# * http://xoroshiro.di.unimi.it/xoroshiro128plus.c
# * http://xoroshiro.di.unimi.it/splitmix64.c
#
# Splitmix64 is used to generate the initial state of the xoroshiro128+
# generator to ensure that small seeds don't result in predictable output.
# **WARNING**: There is a lot of verbose casting in this file to ensure that
# NumPy casting conventions (which cast uint64 [op] int32 to float64) don't
# turn integers into floats when using these functions in the CUDA simulator.
#
# There are also no function type signatures to ensure that compilation is
# deferred so that import is quick, and Sphinx autodoc works. We are also
# using the CPU @jit decorator everywhere to create functions that work as
# both CPU and CUDA device functions.
xoroshiro128p_dtype = np.dtype([('s0', np.uint64), ('s1', np.uint64)], align=True)
xoroshiro128p_type = from_dtype(xoroshiro128p_dtype)
@jit
def init_xoroshiro128p_state(states, index, seed):
'''Use SplitMix64 to generate an xoroshiro128p state from 64-bit seed.
This ensures that manually set small seeds don't result in a predictable
initial sequence from the random number generator.
:type states: 1D array, dtype=xoroshiro128p_dtype
:param states: array of RNG states
:type index: uint64
:param index: offset in states to update
:type seed: int64
:param seed: seed value to use when initializing state
'''
index = int64(index)
seed = uint64(seed)
z = seed + uint64(0x9E3779B97F4A7C15)
z = (z ^ (z >> uint32(30))) * uint64(0xBF58476D1CE4E5B9)
z = (z ^ (z >> uint32(27))) * uint64(0x94D049BB133111EB)
z = z ^ (z >> uint32(31))
states[index]['s0'] = z
states[index]['s1'] = z
@jit
def rotl(x, k):
'''Left rotate x by k bits.'''
x = uint64(x)
k = uint32(k)
return (x << k) | (x >> uint32(64 - k))
@jit
def xoroshiro128p_next(states, index):
'''Return the next random uint64 and advance the RNG in states[index].
:type states: 1D array, dtype=xoroshiro128p_dtype
:param states: array of RNG states
:type index: int64
:param index: offset in states to update
:rtype: uint64
'''
index = int64(index)
s0 = states[index]['s0']
s1 = states[index]['s1']
result = s0 + s1
s1 ^= s0
states[index]['s0'] = uint64(rotl(s0, uint32(55))) ^ s1 ^ (s1 << uint32(14))
states[index]['s1'] = uint64(rotl(s1, uint32(36)))
return result
XOROSHIRO128P_JUMP = (uint64(0xbeac0467eba5facb), uint64(0xd86b048b86aa9922))
@jit
def xoroshiro128p_jump(states, index):
'''Advance the RNG in ``states[index]`` by 2**64 steps.
:type states: 1D array, dtype=xoroshiro128p_dtype
:param states: array of RNG states
:type index: int64
:param index: offset in states to update
'''
index = int64(index)
s0 = uint64(0)
s1 = uint64(0)
for i in range(2):
for b in range(64):
if XOROSHIRO128P_JUMP[i] & (uint64(1) << uint32(b)):
s0 ^= states[index]['s0']
s1 ^= states[index]['s1']
xoroshiro128p_next(states, index)
states[index]['s0'] = s0
states[index]['s1'] = s1
@jit
def uint64_to_unit_float64(x):
'''Convert uint64 to float64 value in the range [0.0, 1.0)'''
x = uint64(x)
return (x >> uint32(11)) * (float64(1) / (uint64(1) << uint32(53)))
@jit
def uint64_to_unit_float32(x):
'''Convert uint64 to float32 value in the range [0.0, 1.0)'''
x = uint64(x)
return float32(uint64_to_unit_float64(x))
@jit
def xoroshiro128p_uniform_float32(states, index):
'''Return a float32 in range [0.0, 1.0) and advance ``states[index]``.
:type states: 1D array, dtype=xoroshiro128p_dtype
:param states: array of RNG states
:type index: int64
:param index: offset in states to update
:rtype: float32
'''
index = int64(index)
return uint64_to_unit_float32(xoroshiro128p_next(states, index))
@jit
def xoroshiro128p_uniform_float64(states, index):
'''Return a float64 in range [0.0, 1.0) and advance ``states[index]``.
:type states: 1D array, dtype=xoroshiro128p_dtype
:param states: array of RNG states
:type index: int64
:param index: offset in states to update
:rtype: float64
'''
index = int64(index)
return uint64_to_unit_float64(xoroshiro128p_next(states, index))
TWO_PI_FLOAT32 = np.float32(2 * math.pi)
TWO_PI_FLOAT64 = np.float64(2 * math.pi)
@jit
def xoroshiro128p_normal_float32(states, index):
'''Return a normally distributed float32 and advance ``states[index]``.
The return value is drawn from a Gaussian of mean=0 and sigma=1 using the
Box-Muller transform. This advances the RNG sequence by two steps.
:type states: 1D array, dtype=xoroshiro128p_dtype
:param states: array of RNG states
:type index: int64
:param index: offset in states to update
:rtype: float32
'''
index = int64(index)
u1 = xoroshiro128p_uniform_float32(states, index)
u2 = xoroshiro128p_uniform_float32(states, index)
z0 = math.sqrt(-float32(2.0) * math.log(u1)) * math.cos(TWO_PI_FLOAT32 * u2)
# discarding second normal value
# z1 = math.sqrt(-float32(2.0) * math.log(u1)) * math.sin(TWO_PI_FLOAT32 * u2)
return z0
@jit
def xoroshiro128p_normal_float64(states, index):
'''Return a normally distributed float32 and advance ``states[index]``.
The return value is drawn from a Gaussian of mean=0 and sigma=1 using the
Box-Muller transform. This advances the RNG sequence by two steps.
:type states: 1D array, dtype=xoroshiro128p_dtype
:param states: array of RNG states
:type index: int64
:param index: offset in states to update
:rtype: float64
'''
index = int64(index)
u1 = xoroshiro128p_uniform_float32(states, index)
u2 = xoroshiro128p_uniform_float32(states, index)
z0 = math.sqrt(-float64(2.0) * math.log(u1)) * math.cos(TWO_PI_FLOAT64 * u2)
# discarding second normal value
# z1 = math.sqrt(-float64(2.0) * math.log(u1)) * math.sin(TWO_PI_FLOAT64 * u2)
return z0
@jit
def init_xoroshiro128p_states_cpu(states, seed, subsequence_start):
n = states.shape[0]
seed = uint64(seed)
subsequence_start = uint64(subsequence_start)
if n >= 1:
init_xoroshiro128p_state(states, 0, seed)
# advance to starting subsequence number
for _ in range(subsequence_start):
xoroshiro128p_jump(states, 0)
# populate the rest of the array
for i in range(1, n):
states[i] = states[i - 1] # take state of previous generator
xoroshiro128p_jump(states, i) # and jump forward 2**64 steps
def init_xoroshiro128p_states(states, seed, subsequence_start=0, stream=0):
'''Initialize RNG states on the GPU for parallel generators.
This intializes the RNG states so that each state in the array corresponds
subsequences in the separated by 2**64 steps from each other in the main
sequence. Therefore, as long no CUDA thread requests more than 2**64
random numbers, all of the RNG states produced by this function are
guaranteed to be independent.
The subsequence_start parameter can be used to advance the first RNG state
by a multiple of 2**64 steps.
:type states: 1D DeviceNDArray, dtype=xoroshiro128p_dtype
:param states: array of RNG states
:type seed: uint64
:param seed: starting seed for list of generators
'''
# Initialization on CPU is much faster than the GPU
states_cpu = np.empty(shape=states.shape, dtype=xoroshiro128p_dtype)
init_xoroshiro128p_states_cpu(states_cpu, seed, subsequence_start)
states.copy_to_device(states_cpu, stream=stream)
def create_xoroshiro128p_states(n, seed, subsequence_start=0, stream=0):
'''Returns a new device array initialized for n random number generators.
This intializes the RNG states so that each state in the array corresponds
subsequences in the separated by 2**64 steps from each other in the main
sequence. Therefore, as long no CUDA thread requests more than 2**64
random numbers, all of the RNG states produced by this function are
guaranteed to be independent.
The subsequence_start parameter can be used to advance the first RNG state
by a multiple of 2**64 steps.
:type n: int
:param n: number of RNG states to create
:type seed: uint64
:param seed: starting seed for list of generators
:type subsequence_start: uint64
:param subsequence_start:
:type stream: CUDA stream
:param stream: stream to run initialization kernel on
'''
states = cuda.device_array(n, dtype=xoroshiro128p_dtype, stream=stream)
init_xoroshiro128p_states(states, seed, subsequence_start, stream)
return states
|
jriehl/numba
|
numba/cuda/random.py
|
Python
|
bsd-2-clause
| 9,232
|
[
"Gaussian"
] |
55d024248cfa14d5b2fef812e2efd0488667e36a3d397dae4bccfed8b73de121
|
import os
import glob
import sys
import shutil
import pysam
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.utils import (safe_makedir, file_exists)
from bcbio.provenance import do
from bcbio import utils
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio import bam
from bcbio import broad
from bcbio.wgbsseq import kits
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
assert data["analysis"].lower().startswith("wgbs-seq"), "No comparible alignment."
config = data["config"]
sample = dd.get_sample_name(data)
out_prefix = os.path.join(align_dir, dd.get_lane(data))
out_dir = os.path.join(align_dir, "%s_bismark" % dd.get_lane(data))
if not ref_file:
logger.error("bismark index not found. You can install "
"the index for your genome with: bcbio_nextgen.py upgrade "
"--aligners bismark --genomes genome-build-name --data")
sys.exit(1)
final_out = os.path.join(align_dir, "{0}.bam".format(sample))
if file_exists(final_out):
data = dd.set_work_bam(data, final_out)
data["bam_report"] = glob.glob(os.path.join(out_dir, "*report.txt"))[0]
data = dd.update_summary_qc(data, "bismark", base=data["bam_report"])
return data
bismark = config_utils.get_program("bismark", config)
# bismark uses 5 threads/sample and ~12GB RAM/sample (hg38)
resources = config_utils.get_resources("bismark", data["config"])
max_cores = dd.get_num_cores(data)
max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) / (1024.0 * 1024.0)
instances = calculate_bismark_instances(max_cores, max_mem * max_cores)
# override instances if specified in the config
if resources and resources.get("bismark_threads"):
instances = resources.get("bismark_threads")
logger.info(f"Using {instances} bismark instances - overriden by resources")
bowtie_threads = 2
if resources and resources.get("bowtie_threads"):
bowtie_threads = resources.get("bowtie_threads")
logger.info(f"Using {bowtie_threads} bowtie threads per bismark instance")
kit = kits.KITS.get(dd.get_kit(data), None)
directional = "--non_directional" if kit and not kit.is_directional else ""
other_opts = ""
if resources and resources.get("options", []):
other_opts = resources.get("options", [])
if "--non_directional" in other_opts:
if directional != "--non_directional":
directional = "--non_directional"
logger.info(f"Directional setting in the kit != setting in the yaml/resources, using {directional}")
other_opts.remove("--non_directional")
other_opts = " ".join([str(x) for x in other_opts]).strip()
fastq_files = " ".join([fastq_file, pair_file]) if pair_file else fastq_file
safe_makedir(align_dir)
cmd = "{bismark} {other_opts} {directional} --bowtie2 --temp_dir {tx_out_dir} --gzip --parallel {instances} -p {bowtie_threads} -o {tx_out_dir} --unmapped {ref_file} {fastq_file} "
if pair_file:
fastq_file = "-1 %s -2 %s" % (fastq_file, pair_file)
raw_bam = glob.glob(out_dir + "/*bismark*bt2*bam")
if not raw_bam:
with tx_tmpdir() as tx_out_dir:
run_message = "Running Bismark aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
shutil.move(tx_out_dir, out_dir)
raw_bam = glob.glob(out_dir + "/*bismark*bt2*bam")
# don't process bam in the bismark pipeline!
utils.symlink_plus(raw_bam[0], final_out)
data = dd.set_work_bam(data, final_out)
data["bam_report"] = glob.glob(os.path.join(out_dir, "*report.txt"))[0]
data = dd.update_summary_qc(data, "bismark", base=data["bam_report"])
return data
def _process_bam(bam_file, in_fastq, sample, reference, config):
broad_runner = broad.runner_from_config(config)
names = {'rg': in_fastq, 'library': 'WGBS_LIB', 'pl': 'Illumina', 'pu': 'R1', 'sm': in_fastq, 'sample': sample}
out_fix_bam = broad_runner.run_fn("picard_fix_rgs", bam_file, names)
order_bam = utils.append_stem(out_fix_bam, "_order")
broad_runner.run_fn("picard_reorder", out_fix_bam, reference, order_bam)
bam.index(order_bam, config)
# order_bam = _set_quality(order_bam)
# bam.index(order_bam, config)
return order_bam
def remap_index_fn(ref_file):
"""Map sequence references to equivalent bismark indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "bismark")
def _set_quality(in_bam):
"""
change all quality to 255
"""
bam = pysam.AlignmentFile(in_bam, "rb")
out_file = utils.append_stem(in_bam, "_normqual")
if file_exists(out_file):
return out_file
with file_transaction(out_file) as tx_out:
with pysam.AlignmentFile(tx_out, "wb", template=bam) as out_handle:
for read in bam.fetch():
read.mapping_quality = 255
out_handle.write(read)
return out_file
def index(ref_file, out_dir, data):
"""Create a bismark index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_transcriptome_gtf(data, default=dd.get_gtf_file(data))
bismark = config_utils.find_program("bismark", data["config"])
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a bismark index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
other_opts = config_utils.get_resources("bismark", data["config"]).get("options", [])
other_opts = " ".join([str(x) for x in other_opts]).strip()
cmd = "{bismark} {other_opts} --bowtie2 -p {num_cores} -n 1 -o {tx_out_dir} --basename {sample} --unmapped {ref_file} {in_fastq}"
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
def calculate_bismark_instances(cores, memory):
"""
calculate number of parallel bismark instances to run, based on disussion here
https://github.com/FelixKrueger/Bismark/issues/96
cores and memory here are the maximum amounts available for us to use
"""
BISMARK_CORES = 1
BOWTIE_CORES_PER_INSTANCE = 2
SAMTOOLS_CORES_PER_INSTANCE = 1
CORES_PER_INSTANCE = BOWTIE_CORES_PER_INSTANCE + SAMTOOLS_CORES_PER_INSTANCE
GENOME_MEMORY_GB = 12
INSTANCE_MEMORY_GB = 10
available_instance_memory = memory - GENOME_MEMORY_GB
instances_in_memory = max(available_instance_memory / INSTANCE_MEMORY_GB, 1)
available_instance_cores = cores - BISMARK_CORES
instances_in_cores = max(available_instance_cores / CORES_PER_INSTANCE, 1)
instances = int(min(instances_in_memory, instances_in_cores))
logger.info(f"{cores} cores and {memory} memory are available. Spinning up {instances} instances of bismark.")
return instances
|
lbeltrame/bcbio-nextgen
|
bcbio/ngsalign/bismark.py
|
Python
|
mit
| 7,269
|
[
"Bowtie",
"pysam"
] |
1fef7324dac8fca2f0ec0c5db4eabc0eb0719439202a065064bb5ae3dd6e3088
|
import threading
import glob
import gzip
try:
from StringIO import StringIO # Python 2.7
except:
from io import StringIO # Python 3.3+
import uuid
import json
import base64
import re
import os
import sys
import pandas as pd
from prettytable import PrettyTable
import pybars
from .queries import mysql as mysql_templates
from .queries import postgres as postgres_templates
from .queries import sqlite as sqlite_templates
from .queries import mssql as mssql_templates
queries_templates = {
"mysql": mysql_templates,
"postgres": postgres_templates,
"redshift": postgres_templates,
"sqlite": sqlite_templates,
"mssql": mssql_templates,
}
# attempt to import the relevant database libraries
# TODO: maybe add warnings?
try:
import psycopg2 as pg
HAS_PG = True
except ImportError:
HAS_PG = False
try:
import MySQLdb
mysql_connect = MySQLdb.connect
HAS_MYSQL = True
except ImportError:
try:
import pymysql
mysql_connect = pymysql.connect
HAS_MYSQL = True
except ImportError:
HAS_MYSQL = False
try:
import sqlite3 as sqlite
HAS_SQLITE = True
except ImportError:
HAS_SQLITE = False
try:
import pyodbc as pyo
HAS_ODBC = True
except ImportError:
try:
import pypyodbc as pyo
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
try:
import pymssql
HAS_PYMSSQL = True
except ImportError:
HAS_PYMSSQL = False
class Column(object):
"""
A Columns is an in-memory reference to a column in a particular table. You
can use it to do some basic DB exploration and you can also use it to
execute simple queries.
"""
def __init__(self, con, query_templates, table, name, dtype, keys_per_column):
self._con = con
self._query_templates = query_templates
self.table = table
self.name = name
self.type = dtype
self.keys_per_column = keys_per_column
self.foreign_keys = []
self.ref_keys = []
def __repr__(self):
tbl = PrettyTable(["Table", "Name", "Type", "Foreign Keys",
"Reference Keys"])
tbl.add_row([self.table, self.name, self.type, self._str_foreign_keys(),
self._str_ref_keys()])
return str(tbl)
def __str__(self):
return "Column({0})<{1}>".format(self.name, self.__hash__())
def _repr_html_(self):
tbl = PrettyTable(["Table", "Name", "Type"])
tbl.add_row([self.table, self.name, self.type])
return tbl.get_html_string()
def _str_foreign_keys(self):
keys = []
for col in self.foreign_keys:
keys.append("%s.%s" % (col.table, col.name))
if self.keys_per_column is not None and len(keys) > self.keys_per_column:
keys = keys[0:self.keys_per_column] + ['(+ {0} more)'.format(len(keys)-self.keys_per_column)]
return ", ".join(keys)
def _str_ref_keys(self):
keys = []
for col in self.ref_keys:
keys.append("%s.%s" % (col.table, col.name))
if self.keys_per_column is not None and len(keys) > self.keys_per_column:
keys = keys[0:self.keys_per_column] + ['(+ {0} more)'.format(len(keys)-self.keys_per_column)]
return ", ".join(keys)
def head(self, n=6):
"""
Returns first n values of your column as a DataFrame. This is executing:
SELECT
<name_of_the_column>
FROM
<name_of_the_table>
LIMIT <n>
Parameters
----------
n: int
number of rows to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.City.head()
0 Sao Jose dos Campos
1 Stuttgart
2 Montreal
3 Oslo
4 Prague
5 Prague
Name: City, dtype: object
>>> db.tables.Customer.City.head(2)
0 Sao Jose dos Campos
1 Stuttgart
Name: City, dtype: object
"""
q = self._query_templates['column']['head'].format(column=self.name, table=self.table, n=n)
return pd.io.sql.read_sql(q, self._con)[self.name]
def all(self):
"""
Returns entire column as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column>
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.Email.all()
0 luisg@embraer.com.br
1 leonekohler@surfeu.de
2 ftremblay@gmail.com
3 bjorn.hansen@yahoo.no
4 frantisekw@jetbrains.com
5 hholy@gmail.com
6 astrid.gruber@apple.at
7 daan_peeters@apple.be
8 kara.nielsen@jubii.dk
9 eduardo@woodstock.com.br
10 alero@uol.com.br
11 roberto.almeida@riotur.gov.br
...
>>> df = db.tables.Customer.Email.all()
>>> len(df)
59
"""
q = self._query_templates['column']['all'].format(column=self.name, table=self.table)
return pd.io.sql.read_sql(q, self._con)[self.name]
def unique(self):
"""
Returns all unique values as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column>
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.FirstName.unique()
0 Luis
1 Leonie
2 Francois
3 Bjorn
4 Frantisek
5 Helena
6 Astrid
7 Daan
8 Kara
9 Eduardo
10 Alexandre
...
>>> len(db.tables.Customer.LastName.unique())
"""
q = self._query_templates['column']['unique'].format(column=self.name, table=self.table)
return pd.io.sql.read_sql(q, self._con)[self.name]
def sample(self, n=10):
"""
Returns random sample of n rows as a DataFrame. This is executing:
SELECT
<name_of_the_column>
FROM
<name_of_the_table>
ORDER BY
RANDOM()
LIMIT <n>
Parameters
----------
n: int
number of rows to sample
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Artist.Name.sample(10)
0 Julian Bream
1 Godsmack
2 Lost
3 Fretwork
4 Pedro Luis E A Parede
5 Philip Glass Ensemble
6 Marvin Gaye
7 Metallica
8 Alanis Morissette
9 Santana Feat. The Project G&B
Name: Name, dtype: object
"""
q = self._query_templates['column']['sample'].format(column=self.name, table=self.table, n=n)
return pd.io.sql.read_sql(q, self._con)[self.name]
class Table(object):
"""
A Table is an in-memory reference to a table in a database. You can use it to get more info
about the columns, schema, etc. of a table and you can also use it to execute queries.
"""
def __init__(self, con, query_templates, name, cols, keys_per_column):
self.name = name
self._con = con
self._cur = con.cursor()
self._query_templates = query_templates
self.foreign_keys = []
self.ref_keys = []
self.keys_per_column = keys_per_column
self._columns = cols
for col in cols:
attr = col.name
if attr in ("name", "con"):
attr = "_" + col.name
setattr(self, attr, col)
self._cur.execute(self._query_templates['system']['foreign_keys_for_table'].format(table=self.name))
for (column_name, foreign_table, foreign_column) in self._cur:
col = getattr(self, column_name)
foreign_key = Column(con, queries_templates, foreign_table, foreign_column, col.type, self.keys_per_column)
self.foreign_keys.append(foreign_key)
col.foreign_keys.append(foreign_key)
setattr(self, column_name, col)
self.foreign_keys = ColumnSet(self.foreign_keys)
self._cur.execute(self._query_templates['system']['ref_keys_for_table'].format(table=self.name))
for (column_name, ref_table, ref_column) in self._cur:
col = getattr(self, column_name)
ref_key = Column(con, queries_templates, ref_table, ref_column, col.type, self.keys_per_column)
self.ref_keys.append(ref_key)
col.ref_keys.append(ref_key)
setattr(self, column_name, col)
self.ref_keys = ColumnSet(self.ref_keys)
def _tablify(self):
tbl = PrettyTable(["Column", "Type", "Foreign Keys", "Reference Keys"])
tbl.align["Column"] = "l"
tbl.align["Type"] = "l"
tbl.align["Foreign Keys"] = "l"
tbl.align["Reference Keys"] = "l"
for col in self._columns:
tbl.add_row([col.name, col.type, col._str_foreign_keys(), col._str_ref_keys()])
return tbl
def __repr__(self):
tbl = str(self._tablify())
r = tbl.split('\n')[0]
brk = "+" + "-"*(len(r)-2) + "+"
title = "|" + self.name.center(len(r)-2) + "|"
return brk + "\n" + title + "\n" + tbl
def __str__(self):
return "Table({0})<{1}>".format(self.name, self.__hash__())
def _repr_html_(self):
return self._tablify().get_html_string()
def select(self, *args):
"""
Returns DataFrame of table with arguments selected as columns. This is
executing:
SELECT
<name of column 1>
, <name of column 2>
, <name of column 3>
FROM
<name_of_the_table>
Parameters
----------
*args: str
columns to select
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
# select name from the Track table
>>> db.tables.Track.select("Name")
Name
0 For Those About To Rock (We Salute You)
1 Balls to the Wall
2 Fast As a Shark
3 Restless and Wild
4 Princess of the Dawn
5 Put The Finger On You
6 Let's Get It Up
7 Inject The Venom
8 Snowballed
9 Evil Walks
...
# select name & composer from the Track table
>>> df = db.tables.Track.select("Name", "Composer")
"""
q = self._query_templates['table']['select'].format(columns=", ".join(args), table=self.name)
return pd.io.sql.read_sql(q, self._con)
def head(self, n=6):
"""
Returns first n values of your table as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
LIMIT <n>
Parameters
----------
n: int
number of rows to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
# select name from the Track table
>>> db.tables.Track.head()
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
2 3 Fast As a Shark 3 2
3 4 Restless and Wild 3 2
4 5 Princess of the Dawn 3 2
5 6 Put The Finger On You 1 1
GenreId Composer Milliseconds \
0 1 Angus Young, Malcolm Young, Brian Johnson 343719
1 1 None 342562
2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Ho... 230619
3 1 F. Baltes, R.A. Smith-Diesel, S. Kaufman, U. D... 252051
4 1 Deaffy & R.A. Smith-Diesel 375418
5 1 Angus Young, Malcolm Young, Brian Johnson 205662
Bytes UnitPrice
0 11170334 0.99
1 5510424 0.99
2 3990994 0.99
3 4331779 0.99
4 6290521 0.99
5 6713451 0.99
>>> db.tables.Track.head(1)
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
GenreId Composer Milliseconds Bytes \
0 1 Angus Young, Malcolm Young, Brian Johnson 343719 11170334
UnitPrice
0 0.99
"""
q = self._query_templates['table']['head'].format(table=self.name, n=n)
return pd.io.sql.read_sql(q, self._con)
def all(self):
"""
Returns entire table as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> len(db.tables.Track.all())
3503
>>> df = db.tables.Track.all()
"""
q = self._query_templates['table']['all'].format(table=self.name)
return pd.io.sql.read_sql(q, self._con)
def unique(self, *args):
"""
Returns all unique values as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column_1>
, <name_of_the_column_2>
, <name_of_the_column_3>
...
FROM
<name_of_the_table>
Parameters
----------
*args: columns as strings
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Track.unique("GenreId")
GenreId
0 1
1 2
2 3
3 4
4 5
5 6
6 7
7 8
8 9
9 10
10 11
11 12
12 13
13 14
14 15
15 16
16 17
17 18
18 19
19 20
20 21
21 22
22 23
23 24
24 25
>>> len(db.tables.Track.unique("GenreId", "MediaTypeId"))
38
"""
if len(args)==0:
columns = "*"
else:
columns = ", ".join(args)
q = self._query_templates['table']['unique'].format(columns=columns, table=self.name)
return pd.io.sql.read_sql(q, self._con)
def sample(self, n=10):
"""
Returns random sample of n rows as a DataFrame. This is executing:
SELECT
*
FROM
<name_of_the_table>
ORDER BY
RANDOM()
LIMIT <n>
Parameters
----------
n: int
number of rows to sample
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Track.sample(10)
TrackId Name AlbumId \
0 274 Samba Makossa 25
1 1971 Girls, Girls, Girls 162
2 843 Otay 68
3 3498 Concerto for Violin, Strings and Continuo in G... 342
4 3004 Pride (In The Name Of Love) 238
5 2938 Beautiful Day 233
6 2023 O Braco Da Minha Guitarra 165
7 1920 Caxanga 158
8 3037 The Wanderer 240
9 1487 Third Stone From The Sun 120
MediaTypeId GenreId Composer \
0 1 7 None
1 1 3 Mick Mars/Nikki Sixx/Tommy Lee
2 1 2 John Scofield, Robert Aries, Milton Chambers a...
3 4 24 Pietro Antonio Locatelli
4 1 1 U2
5 1 1 Adam Clayton, Bono, Larry Mullen, The Edge
6 1 1 None
7 1 7 Milton Nascimento, Fernando Brant
8 1 1 U2; Bono
9 1 1 Jimi Hendrix
Milliseconds Bytes UnitPrice
0 271856 9095410 0.99
1 270288 8874814 0.99
2 423653 14176083 0.99
3 493573 16454937 0.99
4 230243 7549085 0.99
5 248163 8056723 0.99
6 258351 8469531 0.99
7 245551 8144179 0.99
8 283951 9258717 0.99
9 404453 13186975 0.99
"""
q = self._query_templates['table']['sample'].format(table=self.name, n=n)
return pd.io.sql.read_sql(q, self._con)
@property
def count(self):
"""Return total of rows from table."""
return len(self.all())
class TableSet(object):
"""
Set of Tables. Used for displaying search results in terminal/ipython notebook.
"""
def __init__(self, tables):
for tbl in tables:
setattr(self, tbl.name, tbl)
self.tables = tables
def __getitem__(self, i):
return self.tables[i]
def _tablify(self):
tbl = PrettyTable(["Table", "Columns"])
tbl.align["Table"] = "l"
tbl.align["Columns"] = "l"
for table in self.tables:
column_names = [col.name for col in table._columns]
column_names = ", ".join(column_names)
pretty_column_names = ""
for i in range(0, len(column_names), 80):
pretty_column_names += column_names[i:(i+80)] + "\n"
pretty_column_names = pretty_column_names.strip()
tbl.add_row([table.name, pretty_column_names])
return tbl
def __repr__(self):
tbl = str(self._tablify())
return tbl
def _repr_html_(self):
return self._tablify().get_html_string()
class ColumnSet(object):
"""
Set of Columns. Used for displaying search results in terminal/ipython
notebook.
"""
def __init__(self, columns):
self.columns = columns
def __getitem__(self, i):
return self.columns[i]
def _tablify(self):
tbl = PrettyTable(["Table", "Column Name", "Type"])
tbl.align["Table"] = "l"
tbl.align["Column"] = "l"
tbl.align["Type"] = "l"
for col in self.columns:
tbl.add_row([col.table, col.name, col.type])
return tbl
def __repr__(self):
tbl = str(self._tablify())
return tbl
def _repr_html_(self):
return self._tablify().get_html_string()
class S3(object):
"""
Simple object for storing AWS credentials
"""
def __init__(self, access_key, secret_key, profile=None):
if profile:
self.load_credentials(profile)
else:
self.access_key = access_key
self.secret_key = secret_key
def save_credentials(self, profile):
"""
Saves credentials to a dotfile so you can open them grab them later.
Parameters
----------
profile: str
name for your profile (i.e. "dev", "prod")
"""
home = os.path.expanduser("~")
filename = os.path.join(home, ".db.py_s3_" + profile)
creds = {
"access_key": self.access_key,
"secret_key": self.secret_key
}
with open(filename, 'wb') as f:
data = json.dumps(creds)
try:
f.write(base64.encodestring(data))
except:
f.write(base64.encodestring(bytes(data, 'utf-8')))
def load_credentials(self, profile):
"""
Loads crentials for a given profile. Profiles are stored in
~/.db.py_s3_{profile_name} and are a base64 encoded JSON file. This is
not to say this a secure way to store sensitive data, but it will
probably stop your little sister from spinning up EC2 instances.
Parameters
----------
profile: str
identifier/name for your database (i.e. "dev", "prod")
"""
user = os.path.expanduser("~")
f = os.path.join(user, ".db.py_s3_" + profile)
if os.path.exists(f):
creds = json.loads(base64.decodestring(open(f, 'rb').read()).encode('utf-8'))
if 'access_key' not in creds:
raise Exception("`access_key` not found in s3 profile '{0}'".format(profile))
self.access_key = creds['access_key']
if 'access_key' not in creds:
raise Exception("`secret_key` not found in s3 profile '{0}'".format(profile))
self.secret_key = creds['secret_key']
class DB(object):
"""
Utility for exploring and querying a database.
Parameters
----------
username: str
Your username for the database
password: str
Your password for the database
hostname: str
Hostname your database is running on (i.e. "localhost", "10.20.1.248")
port: int
Port the database is running on. defaults to default port for db.
portgres: 5432
redshift: 5439
mysql: 3306
sqlite: n/a
mssql: 1433
filename: str
path to sqlite database
dbname: str
Name of the database
schemas: list
List of schemas to include. Defaults to all.
profile: str
Preconfigured database credentials / profile for how you like your queries
exclude_system_tables: bool
Whether or not to include "system" tables (the ones that the database needs
in order to operate). This includes things like schema definitions. Most of
you probably don't need this, but if you're a db admin you might actually
want to query the system tables.
limit: int, None
Default number of records to return in a query. This is used by the DB.query
method. You can override it by adding limit={X} to the `query` method, or
by passing an argument to `DB()`. None indicates that there will be no
limit (That's right, you'll be limitless. Bradley Cooper style.)
keys_per_column: int, None
Default number of keys to display in the foreign and reference keys.
This is used to control the rendering of PrettyTable a bit. None means
that you'll have verrrrrrrry wide columns in some cases.
driver: str, None
Driver for mssql/pyodbc connections.
Examples
--------
>>> from db import DB
>>> db = DB(username="kermit", password="ilikeflies", hostname="themuppets.com",
port=5432, dbname="muppets", dbtype="postgres")
>>> db = DB(username="fozzybear", password="wakawakawaka", hostname="ec2.523.24.131",
port=5432, dbname="muppets_redshift", dbtype="redshift")
>>> db = DB(username="dev", hostname="localhost",
port=5432, dbname="devdb", dbtype="postgres")
>>> db = DB(username="root", hostname="localhost", dbname="employees", dbtype="mysql")
>>> db = DB(filename="/path/to/mydb.sqlite", dbtype="sqlite")
>>> db = DB(dbname="AdventureWorks2012", dbtype="mssql", driver="{FreeTDS}")
"""
def __init__(self, username=None, password=None, hostname="localhost",
port=None, filename=None, dbname=None, dbtype=None, schemas=None,
profile="default", exclude_system_tables=True, limit=1000,
keys_per_column=None, driver=None):
if port is None:
if dbtype=="postgres":
port = 5432
elif dbtype=="redshift":
port = 5439
elif dbtype=="mysql":
port = 3306
elif dbtype=="sqlite":
port = None
elif dbtype=="mssql":
port = 1433
elif profile is not None:
pass
else:
raise Exception("Database type not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift")
if not dbtype in ("sqlite", "mssql") and username is None:
self.load_credentials(profile)
elif dbtype=="sqlite" and filename is None:
self.load_credentials(profile)
else:
self.username = username
self.password = password
self.hostname = hostname
self.port = port
self.filename = filename
self.dbname = dbname
self.dbtype = dbtype
self.schemas = schemas
self.limit = limit
self.keys_per_column = keys_per_column
self.driver = driver
if self.dbtype is None:
raise Exception("Database type not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift")
self._query_templates = queries_templates.get(self.dbtype).queries
if self.dbtype=="postgres" or self.dbtype=="redshift":
if not HAS_PG:
raise Exception("Couldn't find psycopg2 library. Please ensure it is installed")
self.con = pg.connect(user=self.username, password=self.password,
host=self.hostname, port=self.port, dbname=self.dbname)
self.cur = self.con.cursor()
elif self.dbtype=="sqlite":
if not HAS_SQLITE:
raise Exception("Couldn't find sqlite library. Please ensure it is installed")
self.con = sqlite.connect(self.filename)
self.cur = self.con.cursor()
self._create_sqlite_metatable()
elif self.dbtype=="mysql":
if not HAS_MYSQL:
raise Exception("Couldn't find MySQLdb or pymysql library. Please ensure it is installed")
creds = {}
for arg in ["username", "password", "hostname", "port", "dbname"]:
if getattr(self, arg):
value = getattr(self, arg)
if arg=="username":
arg = "user"
elif arg=="password":
arg = "passwd"
elif arg=="dbname":
arg = "db"
elif arg=="hostname":
arg = "host"
creds[arg] = value
self.con = mysql_connect(**creds)
self.con.autocommit(True)
self.cur = self.con.cursor()
elif self.dbtype=="mssql":
if not HAS_ODBC and not HAS_PYMSSQL:
raise Exception("Couldn't find pyodbc or pymssql libraries. Please ensure one of them is installed")
if HAS_ODBC:
base_con = "Driver={driver};Server={server};Database={database};".format(
driver=self.driver or "SQL Server",
server=self.hostname or "localhost",
database=self.dbname or ''
)
conn_str = ((self.username and self.password) and "{}{}".format(
base_con,
"User Id={username};Password={password};".format(
username=self.username,
password=self.password
)
) or "{}{}".format(base_con, "Trusted_Connection=Yes;"))
try:
self.con = pyo.connect(conn_str)
self.cur = self.con.cursor()
except:
self.con = pyo.connect(
driver=self.driver or "SQL Server",
server=self.hostname or "localhost",
port=self.port,
database=self.dbname or '',
uid=self.username,
pwd=self.password)
self.cur = self.con.cursor()
elif HAS_PYMSSQL:
if hasattr(self, 'port'):
hostname = '{0}:{1}'.format(self.hostname, self.port)
else:
hostname = self.hostname
self.con = pymssql.connect(host=hostname,
user=self.username,
password=self.password,
database=self.dbname)
self.cur = self.con.cursor()
self.tables = TableSet([])
self.refresh_schema(exclude_system_tables)
self.handlebars = pybars.Compiler()
def __str__(self):
return "DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}".format(
dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname)
def __repr__(self):
return self.__str__()
def __delete__(self):
del self.cur
del self.con
def load_credentials(self, profile="default"):
"""
Loads crentials for a given profile. Profiles are stored in
~/.db.py_{profile_name} and are a base64 encoded JSON file. This is not
to say this a secure way to store sensitive data, but it will probably
stop your little sister from stealing your passwords.
Parameters
----------
profile: str
(optional) identifier/name for your database (i.e. "dw", "prod")
"""
user = os.path.expanduser("~")
f = os.path.join(user, ".db.py_" + profile)
if os.path.exists(f):
raw_creds = open(f, 'rb').read()
raw_creds = base64.decodestring(raw_creds).decode('utf-8')
creds = json.loads(raw_creds)
self.username = creds.get('username')
self.password = creds.get('password')
self.hostname = creds.get('hostname')
self.port = creds.get('port')
self.filename = creds.get('filename')
self.dbname = creds.get('dbname')
self.dbtype = creds.get('dbtype')
self.schemas = creds.get('schemas')
self.limit = creds.get('limit')
self.keys_per_column = creds.get('keys_per_column')
else:
raise Exception("Credentials not configured!")
def save_credentials(self, profile="default"):
"""
Save your database credentials so you don't have to save them in script.
Parameters
----------
profile: str
(optional) identifier/name for your database (i.e. "dw", "prod")
>>> db = DB(username="hank", password="foo",
>>> hostname="prod.mardukas.com", dbname="bar")
>>> db.save_credentials(profile="production")
>>> db = DB(username="hank", password="foo",
>>> hostname="staging.mardukas.com", dbname="bar")
>>> db.save_credentials(profile="staging")
>>> db = DB(profile="staging")
"""
if self.filename:
db_filename = os.path.join(os.getcwd(), self.filename)
else:
db_filename = None
user = os.path.expanduser("~")
dotfile = os.path.join(user, ".db.py_" + profile)
creds = {
"username": self.username,
"password": self.password,
"hostname": self.hostname,
"port": self.port,
"filename": db_filename,
"dbname": self.dbname,
"dbtype": self.dbtype,
"schemas": self.schemas,
"limit": self.limit,
"keys_per_column": self.keys_per_column,
}
with open(dotfile, 'wb') as f:
data = json.dumps(creds)
try:
f.write(base64.encodestring(data))
except:
f.write(base64.encodestring(bytes(data, 'utf-8')))
def find_table(self, search):
"""
Aggresively search through your database's schema for a table.
Parameters
-----------
search: str
glob pattern for what you're looking for
Examples
----------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.find_table("A*")
+--------+--------------------------+
| Table | Columns |
+--------+--------------------------+
| Album | AlbumId, Title, ArtistId |
| Artist | ArtistId, Name |
+--------+--------------------------+
>>> results = db.find_table("tmp*") # returns all tables prefixed w/ tmp
>>> results = db.find_table("prod_*") # returns all tables prefixed w/ prod_
>>> results = db.find_table("*Invoice*") # returns all tables containing trans
>>> results = db.find_table("*") # returns everything
"""
tables = []
for table in self.tables:
if glob.fnmatch.fnmatch(table.name, search):
tables.append(table)
return TableSet(tables)
def find_column(self, search, data_type=None):
"""
Aggresively search through your database's schema for a column.
Parameters
-----------
search: str
glob pattern for what you're looking for
data_type: str, list
(optional) specify which data type(s) you want to return
Examples
----------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.find_column("Name") # returns all columns named "Name"
+-----------+-------------+---------------+
| Table | Column Name | Type |
+-----------+-------------+---------------+
| Artist | Name | NVARCHAR(120) |
| Genre | Name | NVARCHAR(120) |
| MediaType | Name | NVARCHAR(120) |
| Playlist | Name | NVARCHAR(120) |
| Track | Name | NVARCHAR(200) |
+-----------+-------------+---------------+
>>> db.find_column("*Id") # returns all columns ending w/ Id
+---------------+---------------+---------+
| Table | Column Name | Type |
+---------------+---------------+---------+
| Album | AlbumId | INTEGER |
| Album | ArtistId | INTEGER |
| Artist | ArtistId | INTEGER |
| Customer | SupportRepId | INTEGER |
| Customer | CustomerId | INTEGER |
| Employee | EmployeeId | INTEGER |
| Genre | GenreId | INTEGER |
| Invoice | InvoiceId | INTEGER |
| Invoice | CustomerId | INTEGER |
| InvoiceLine | InvoiceId | INTEGER |
| InvoiceLine | TrackId | INTEGER |
| InvoiceLine | InvoiceLineId | INTEGER |
| MediaType | MediaTypeId | INTEGER |
| Playlist | PlaylistId | INTEGER |
| PlaylistTrack | TrackId | INTEGER |
| PlaylistTrack | PlaylistId | INTEGER |
| Track | MediaTypeId | INTEGER |
| Track | TrackId | INTEGER |
| Track | AlbumId | INTEGER |
| Track | GenreId | INTEGER |
+---------------+---------------+---------+
>>> db.find_column("*Address*") # returns all columns containing Address
+----------+----------------+--------------+
| Table | Column Name | Type |
+----------+----------------+--------------+
| Customer | Address | NVARCHAR(70) |
| Employee | Address | NVARCHAR(70) |
| Invoice | BillingAddress | NVARCHAR(70) |
+----------+----------------+--------------+
>>> db.find_column("*Address*", data_type="NVARCHAR(70)") # returns all columns containing Address that are varchars
>>> db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]) # returns all columns have an "e" and are NVARCHAR(70)S or INTEGERS
"""
if isinstance(data_type, str):
data_type = [data_type]
cols = []
for table in self.tables:
for col in vars(table):
if glob.fnmatch.fnmatch(col, search):
if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type:
continue
if isinstance(getattr(table, col), Column):
cols.append(getattr(table, col))
return ColumnSet(cols)
def _assign_limit(self, q, limit=1000):
# postgres, mysql, & sqlite
if self.dbtype in ["postgres", "redshift", "sqlite", "mysql"]:
if limit:
q = q.rstrip().rstrip(";")
q = "select * from ({q}) q limit {limit}".format(q=q, limit=limit)
return q
# mssql
else:
if limit:
q = "select top {limit} * from ({q}) q".format(limit=limit, q=q)
return q
def _apply_handlebars(self, q, data, union=True):
if (sys.version_info < (3, 0)):
q = unicode(q)
template = self.handlebars.compile(q)
if isinstance(data, list):
query = [template(item) for item in data]
query = [str(item) for item in query]
if union==True:
query = "\nUNION ALL".join(query)
else:
query = "\n".join(query)
elif isinstance(data, dict):
query = template(data)
query = str(query)
else:
return q
return query
def query(self, q, data=None, union=True, limit=None):
"""
Query your database with a raw string.
Parameters
----------
q: str
Query string to execute
data: list, dict
Optional argument for handlebars-queries. Data will be passed to the
template and rendered using handlebars.
union: bool
Whether or not "UNION ALL" handlebars templates. This will return
any handlebars queries as a single data frame.
limit: int
Number of records to return
Examples
--------
>>> from db import DemoDB
>>> db.query("select * from Track")
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
2 3 Fast As a Shark 3 2
GenreId Composer Milliseconds \
0 1 Angus Young, Malcolm Young, Brian Johnson 343719
1 1 None 342562
2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Ho... 230619
Bytes UnitPrice
0 11170334 0.99
1 5510424 0.99
2 3990994 0.99
...
>>> db.query("select * from Track", limit=10)
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
2 3 Fast As a Shark 3 2
3 4 Restless and Wild 3 2
4 5 Princess of the Dawn 3 2
5 6 Put The Finger On You 1 1
6 7 Let's Get It Up 1 1
7 8 Inject The Venom 1 1
8 9 Snowballed 1 1
9 10 Evil Walks 1 1
GenreId Composer Milliseconds \
0 1 Angus Young, Malcolm Young, Brian Johnson 343719
1 1 None 342562
2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Ho... 230619
3 1 F. Baltes, R.A. Smith-Diesel, S. Kaufman, U. D... 252051
4 1 Deaffy & R.A. Smith-Diesel 375418
5 1 Angus Young, Malcolm Young, Brian Johnson 205662
6 1 Angus Young, Malcolm Young, Brian Johnson 233926
7 1 Angus Young, Malcolm Young, Brian Johnson 210834
8 1 Angus Young, Malcolm Young, Brian Johnson 203102
9 1 Angus Young, Malcolm Young, Brian Johnson 263497
Bytes UnitPrice
0 11170334 0.99
1 5510424 0.99
2 3990994 0.99
3 4331779 0.99
4 6290521 0.99
5 6713451 0.99
6 7636561 0.99
7 6852860 0.99
8 6599424 0.99
9 8611245 0.99
>>> q = '''
SELECT
a.Title
, t.Name
, t.UnitPrice
FROM
Album a
INNER JOIN
Track t
on a.AlbumId = t.AlbumId;
'''
>>> db.query(q, limit=10)
Title \
0 For Those About To Rock We Salute You
1 Balls to the Wall
2 Restless and Wild
3 Restless and Wild
4 Restless and Wild
5 For Those About To Rock We Salute You
6 For Those About To Rock We Salute You
7 For Those About To Rock We Salute You
8 For Those About To Rock We Salute You
9 For Those About To Rock We Salute You
Name UnitPrice
0 For Those About To Rock (We Salute You) 0.99
1 Balls to the Wall 0.99
2 Fast As a Shark 0.99
3 Restless and Wild 0.99
4 Princess of the Dawn 0.99
5 Put The Finger On You 0.99
6 Let's Get It Up 0.99
7 Inject The Venom 0.99
8 Snowballed 0.99
9 Evil Walks 0.99
>>> template = '''
SELECT
'{{ name }}' as table_name
, COUNT(*) as cnt
FROM
{{ name }}
GROUP BY
table_name
'''
>>> data = [
{"name": "Album"},
{"name": "Artist"},
{"name": "Track"}
]
>>> db.query(q, data=data)
table_name cnt
0 Album 347
1 Artist 275
2 Track 3503
>>> q = '''
SELECT
{{#cols}}
{{#if @last}}
{{ . }}
{{else}}
{{ . }} ,
{{/if}}
{{/cols}}
FROM
Album;
'''
>>> data = {"cols": ["AlbumId", "Title", "ArtistId"]}
>>> db.query(q, data=data, union=False)
AlbumId Title ArtistId
0 1 For Those About To Rock We Salute You 1
1 2 Balls to the Wall 2
2 3 Restless and Wild 2
3 4 Let There Be Rock 1
4 5 Big Ones 3
"""
if data:
q = self._apply_handlebars(q, data, union)
if limit==False:
pass
else:
q = self._assign_limit(q, limit)
return pd.io.sql.read_sql(q, self.con)
def query_from_file(self, filename, data=None, union=True, limit=None):
"""
Query your database from a file.
Parameters
----------
filename: str
A SQL script
data: list, dict
Optional argument for handlebars-queries. Data will be passed to the
template and rendered using handlebars.
union: bool
Whether or not "UNION ALL" handlebars templates. This will return
any handlebars queries as a single data frame.
limit: int
Number of records to return
Examples
--------
>>> from db import DemoDB
>>> q = '''
SELECT
a.Title
, t.Name
, t.UnitPrice
FROM
Album a
INNER JOIN
Track t
on a.AlbumId = t.AlbumId;
'''
>>> with open("myscript.sql", "w") as f:
... f.write(q)
...
>>> db.query_from_file(q, limit=10)
Title \
0 For Those About To Rock We Salute You
1 Balls to the Wall
2 Restless and Wild
3 Restless and Wild
4 Restless and Wild
5 For Those About To Rock We Salute You
6 For Those About To Rock We Salute You
7 For Those About To Rock We Salute You
8 For Those About To Rock We Salute You
9 For Those About To Rock We Salute You
Name UnitPrice
0 For Those About To Rock (We Salute You) 0.99
1 Balls to the Wall 0.99
2 Fast As a Shark 0.99
3 Restless and Wild 0.99
4 Princess of the Dawn 0.99
5 Put The Finger On You 0.99
6 Let's Get It Up 0.99
7 Inject The Venom 0.99
8 Snowballed 0.99
9 Evil Walks 0.99
"""
with open(filename) as fp:
q = fp.read()
if data:
q = self._apply_handlebars(q, data, union)
return self.query(q, limit)
def _create_sqlite_metatable(self):
"""
SQLite doesn't come with any metatables (at least ones that fit into our
framework), so we're going to create them.
"""
sys.stderr.write("Indexing schema. This will take a second...")
rows_to_insert = []
tables = [row[0] for row in self.cur.execute("select name from sqlite_master where type='table';")]
for table in tables:
for row in self.cur.execute("pragma table_info({0})".format(table)):
rows_to_insert.append((table, row[1], row[2]))
# find for table and column names
self.cur.execute("drop table if exists tmp_dbpy_schema;")
self.cur.execute("create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);")
for row in rows_to_insert:
self.cur.execute("insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');".format(*row))
self.cur.execute("SELECT name, sql FROM sqlite_master where sql like '%REFERENCES%';")
# find for foreign keys
self.cur.execute("drop table if exists tmp_dbpy_foreign_keys;")
self.cur.execute("create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);")
foreign_keys = []
self.cur.execute("SELECT name, sql FROM sqlite_master ;")
for (table_name, sql) in self.cur:
rgx = "FOREIGN KEY \(\[(.*)\]\) REFERENCES \[(.*)\] \(\[(.*)\]\)"
if sql is None:
continue
for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql):
foreign_keys.append((table_name, column_name, foreign_table, foreign_key))
for row in foreign_keys:
sql_insert = "insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');"
self.cur.execute(sql_insert.format(*row))
self.con.commit()
sys.stderr.write("finished!\n")
def refresh_schema(self, exclude_system_tables=True):
"""
Pulls your database's schema again and looks for any new tables and
columns.
"""
sys.stderr.write("Refreshing schema. Please wait...")
if self.schemas is not None and isinstance(self.schemas, list) and 'schema_specified' in self._query_templates['system']:
schemas_str = ','.join([repr(schema) for schema in self.schemas])
q = self._query_templates['system']['schema_specified'] % schemas_str
elif exclude_system_tables==True:
q = self._query_templates['system']['schema_no_system']
else:
q = self._query_templates['system']['schema_with_system']
tables = set()
self.cur.execute(q)
cols = []
tables = {}
for (table_name, column_name, data_type)in self.cur:
if table_name not in tables:
tables[table_name] = []
tables[table_name].append(Column(self.con, self._query_templates, table_name, column_name, data_type, self.keys_per_column))
self.tables = TableSet([Table(self.con, self._query_templates, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())])
sys.stderr.write("done!\n")
def _try_command(self, cmd):
try:
self.cur.execute(cmd)
except Exception as e:
print ("Error executing command:")
print ("\t '{0}'".format(cmd))
print ("Exception: {0}".format(e))
self.con.rollback()
def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000,
AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None,
print_sql=False, bucket_location=None, s3_bucket=None):
"""
Upload a dataframe to redshift via s3.
Parameters
----------
name: str
name for your shiny new table
df: DataFrame
data frame you want to save to the db
drop_if_exists: bool (False)
whether you'd like to drop the table if it already exists
chunk_size: int (10000)
Number of DataFrame chunks to upload and COPY from S3. Upload speed
is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes
have 2 slices per node, so if running 2 nodes you will want
chunk_size=4, 8, etc
AWS_ACCESS_KEY: str
your aws access key. if this is None, the function will try
and grab AWS_ACCESS_KEY from your environment variables
AWS_SECRET_KEY: str
your aws secrety key. if this is None, the function will try
and grab AWS_SECRET_KEY from your environment variables
s3: S3
alternative to using keys, you can use an S3 object
print_sql: bool (False)
option for printing sql statement that will be executed
bucket_location: boto.s3.connection.Location
a specific AWS location in which to create the temporary transfer s3
bucket. This should match your redshift cluster's region.
Examples
--------
"""
if self.dbtype!="redshift":
raise Exception("Sorry, feature only available for redshift.")
try:
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.s3.connection import Location
# if boto is present, set the bucket_location to default.
# we can't do this in the function definition because we're
# lazily importing boto only if necessary here.
if bucket_location is None:
bucket_location = Location.DEFAULT
except ImportError:
raise Exception("Couldn't find boto library. Please ensure it is installed")
if s3 is not None:
AWS_ACCESS_KEY = s3.access_key
AWS_SECRET_KEY = s3.secret_key
if AWS_ACCESS_KEY is None:
AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY')
if AWS_SECRET_KEY is None:
AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY')
if AWS_ACCESS_KEY is None:
raise Exception("Must specify AWS_ACCESS_KEY as either function argument or as an environment variable `AWS_ACCESS_KEY`")
if AWS_SECRET_KEY is None:
raise Exception("Must specify AWS_SECRET_KEY as either function argument or as an environment variable `AWS_SECRET_KEY`")
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
#this way users with permission on specific buckets can use this feature
bucket_name = "dbpy-{0}".format(uuid.uuid4())
if s3_bucket:
bucket = conn.get_bucket(s3_bucket)
bucket_name = s3_bucket
else:
bucket = conn.create_bucket(bucket_name, location=bucket_location)
# we're going to chunk the file into pieces. according to amazon, this is
# much faster when it comes time to run the \COPY statment.
#
# see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html
sys.stderr.write("Transfering {0} to s3 in chunks".format(name))
len_df = len(df)
chunks = range(0, len_df, chunk_size)
def upload_chunk(i):
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
chunk = df[i:(i+chunk_size)]
k = Key(bucket)
k.key = 'data-%d-%d.csv.gz' % (i, i + chunk_size)
k.set_metadata('parent', 'db.py')
out = StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(chunk.to_csv(index=False, encoding='utf-8'))
k.set_contents_from_string(out.getvalue())
sys.stderr.write(".")
return i
threads = []
for i in chunks:
t = threading.Thread(target=upload_chunk, args=(i, ))
t.start()
threads.append(t)
# join all threads
for t in threads:
t.join()
sys.stderr.write("done\n")
if drop_if_exists:
sql = "DROP TABLE IF EXISTS {0};".format(name)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
# generate schema from pandas and then adapt for redshift
sql = pd.io.sql.get_schema(df, name)
# defaults to using SQLite format. need to convert it to Postgres
sql = sql.replace("[", "").replace("]", "")
# we'll create the table ONLY if it doens't exist
sql = sql.replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS")
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
# perform the \COPY here. the s3 argument is a prefix, so it'll pick up
# all of the data*.gz files we've created
sys.stderr.write("Copying data from s3 to redshfit...")
sql = """
copy {name} from 's3://{bucket_name}/data'
credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}'
CSV IGNOREHEADER as 1 GZIP;
""".format(name=name, bucket_name=bucket_name,
AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
sys.stderr.write("done!\n")
# tear down the bucket
sys.stderr.write("Tearing down bucket...")
for key in bucket.list():
key.delete()
if not s3_bucket:
conn.delete_bucket(bucket_name)
sys.stderr.write("done!")
def list_profiles():
"""
Lists all of the database profiles available
Examples
--------
>>> from db import list_profiles
>>> list_profiles()
{'demo': {u'dbname': None,
u'dbtype': u'sqlite',
u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite',
u'hostname': u'localhost',
u'password': None,
u'port': 5432,
u'username': None},
'muppets': {u'dbname': u'muppetdb',
u'dbtype': u'postgres',
u'filename': None,
u'hostname': u'muppets.yhathq.com',
u'password': None,
u'port': 5432,
u'username': u'kermit'}}
"""
profiles = {}
user = os.path.expanduser("~")
for f in os.listdir(user):
if f.startswith(".db.py_"):
profile = os.path.join(user, f)
profile = json.loads(base64.decodestring(open(profile).read()))
profiles[f[7:]] = profile
return profiles
def remove_profile(name, s3=False):
"""
Removes a profile from your config
"""
user = os.path.expanduser("~")
if s3==True:
f = os.path.join(user, ".db.py_s3_" + name)
else:
f = os.path.join(user, ".db.py_" + name)
try:
try:
open(f)
except:
raise Exception("Profile '{0}' does not exist. Could not find file {1}".format(name, f))
os.remove(f)
except Exception as e:
raise Exception("Could not remove profile {0}! Excpetion: {1}".format(name, e))
def DemoDB(keys_per_column=None):
"""
Provides an instance of DB that hooks up to the Chinook DB
See http://chinookdatabase.codeplex.com/ for more info.
"""
_ROOT = os.path.abspath(os.path.dirname(__file__))
chinook = os.path.join(_ROOT, 'data', "chinook.sqlite")
return DB(filename=chinook, dbtype="sqlite", keys_per_column=keys_per_column)
|
LeMeteore/db.py
|
db/db.py
|
Python
|
bsd-2-clause
| 60,629
|
[
"Brian"
] |
64d7c97c87e47f3983df5b75effad84a3d3e77730b080c261ac3cc920861c158
|
import sys
import textwrap
import pytest
from _pytest import fixtures
from _pytest.config import ExitCode
from _pytest.fixtures import FixtureRequest
from _pytest.pathlib import Path
from _pytest.pytester import get_public_names
def test_getfuncargnames_functions():
"""Test getfuncargnames for normal functions"""
def f():
raise NotImplementedError()
assert not fixtures.getfuncargnames(f)
def g(arg):
raise NotImplementedError()
assert fixtures.getfuncargnames(g) == ("arg",)
def h(arg1, arg2="hello"):
raise NotImplementedError()
assert fixtures.getfuncargnames(h) == ("arg1",)
def j(arg1, arg2, arg3="hello"):
raise NotImplementedError()
assert fixtures.getfuncargnames(j) == ("arg1", "arg2")
def test_getfuncargnames_methods():
"""Test getfuncargnames for normal methods"""
class A:
def f(self, arg1, arg2="hello"):
raise NotImplementedError()
assert fixtures.getfuncargnames(A().f) == ("arg1",)
def test_getfuncargnames_staticmethod():
"""Test getfuncargnames for staticmethods"""
class A:
@staticmethod
def static(arg1, arg2, x=1):
raise NotImplementedError()
assert fixtures.getfuncargnames(A.static, cls=A) == ("arg1", "arg2")
def test_getfuncargnames_partial():
"""Check getfuncargnames for methods defined with functools.partial (#5701)"""
import functools
def check(arg1, arg2, i):
raise NotImplementedError()
class T:
test_ok = functools.partial(check, i=2)
values = fixtures.getfuncargnames(T().test_ok, name="test_ok")
assert values == ("arg1", "arg2")
def test_getfuncargnames_staticmethod_partial():
"""Check getfuncargnames for staticmethods defined with functools.partial (#5701)"""
import functools
def check(arg1, arg2, i):
raise NotImplementedError()
class T:
test_ok = staticmethod(functools.partial(check, i=2))
values = fixtures.getfuncargnames(T().test_ok, name="test_ok")
assert values == ("arg1", "arg2")
@pytest.mark.pytester_example_path("fixtures/fill_fixtures")
class TestFillFixtures:
def test_fillfuncargs_exposed(self):
# used by oejskit, kept for compatibility
assert pytest._fillfuncargs == fixtures.fillfixtures
def test_funcarg_lookupfails(self, testdir):
testdir.copy_example()
result = testdir.runpytest() # "--collect-only")
assert result.ret != 0
result.stdout.fnmatch_lines(
"""
*def test_func(some)*
*fixture*some*not found*
*xyzsomething*
"""
)
def test_detect_recursive_dependency_error(self, testdir):
testdir.copy_example()
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*recursive dependency involving fixture 'fix1' detected*"]
)
def test_funcarg_basic(self, testdir):
testdir.copy_example()
item = testdir.getitem(Path("test_funcarg_basic.py"))
fixtures.fillfixtures(item)
del item.funcargs["request"]
assert len(get_public_names(item.funcargs)) == 2
assert item.funcargs["some"] == "test_func"
assert item.funcargs["other"] == 42
def test_funcarg_lookup_modulelevel(self, testdir):
testdir.copy_example()
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_funcarg_lookup_classlevel(self, testdir):
p = testdir.copy_example()
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_conftest_funcargs_only_available_in_subdir(self, testdir):
testdir.copy_example()
result = testdir.runpytest("-v")
result.assert_outcomes(passed=2)
def test_extend_fixture_module_class(self, testdir):
testfile = testdir.copy_example()
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_extend_fixture_conftest_module(self, testdir):
p = testdir.copy_example()
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(next(p.visit("test_*.py")))
result.stdout.fnmatch_lines(["*1 passed*"])
def test_extend_fixture_conftest_conftest(self, testdir):
p = testdir.copy_example()
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(next(p.visit("test_*.py")))
result.stdout.fnmatch_lines(["*1 passed*"])
def test_extend_fixture_conftest_plugin(self, testdir):
testdir.makepyfile(
testplugin="""
import pytest
@pytest.fixture
def foo():
return 7
"""
)
testdir.syspathinsert()
testdir.makeconftest(
"""
import pytest
pytest_plugins = 'testplugin'
@pytest.fixture
def foo(foo):
return foo + 7
"""
)
testdir.makepyfile(
"""
def test_foo(foo):
assert foo == 14
"""
)
result = testdir.runpytest("-s")
assert result.ret == 0
def test_extend_fixture_plugin_plugin(self, testdir):
# Two plugins should extend each order in loading order
testdir.makepyfile(
testplugin0="""
import pytest
@pytest.fixture
def foo():
return 7
"""
)
testdir.makepyfile(
testplugin1="""
import pytest
@pytest.fixture
def foo(foo):
return foo + 7
"""
)
testdir.syspathinsert()
testdir.makepyfile(
"""
pytest_plugins = ['testplugin0', 'testplugin1']
def test_foo(foo):
assert foo == 14
"""
)
result = testdir.runpytest()
assert result.ret == 0
def test_override_parametrized_fixture_conftest_module(self, testdir):
"""Test override of the parametrized fixture with non-parametrized one on the test module level."""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
"""
)
testfile = testdir.makepyfile(
"""
import pytest
@pytest.fixture
def spam():
return 'spam'
def test_spam(spam):
assert spam == 'spam'
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_override_parametrized_fixture_conftest_conftest(self, testdir):
"""Test override of the parametrized fixture with non-parametrized one on the conftest level."""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
"""
)
subdir = testdir.mkpydir("subdir")
subdir.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def spam():
return 'spam'
"""
)
)
testfile = subdir.join("test_spam.py")
testfile.write(
textwrap.dedent(
"""\
def test_spam(spam):
assert spam == "spam"
"""
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_override_non_parametrized_fixture_conftest_module(self, testdir):
"""Test override of the non-parametrized fixture with parametrized one on the test module level."""
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def spam():
return 'spam'
"""
)
testfile = testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
params = {'spam': 1}
def test_spam(spam):
assert spam == params['spam']
params['spam'] += 1
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*3 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*3 passed*"])
def test_override_non_parametrized_fixture_conftest_conftest(self, testdir):
"""Test override of the non-parametrized fixture with parametrized one on the conftest level."""
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def spam():
return 'spam'
"""
)
subdir = testdir.mkpydir("subdir")
subdir.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
"""
)
)
testfile = subdir.join("test_spam.py")
testfile.write(
textwrap.dedent(
"""\
params = {'spam': 1}
def test_spam(spam):
assert spam == params['spam']
params['spam'] += 1
"""
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*3 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*3 passed*"])
def test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest(
self, testdir
):
"""Test override of the autouse fixture with parametrized one on the conftest level.
This test covers the issue explained in issue 1601
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True)
def spam():
return 'spam'
"""
)
subdir = testdir.mkpydir("subdir")
subdir.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
"""
)
)
testfile = subdir.join("test_spam.py")
testfile.write(
textwrap.dedent(
"""\
params = {'spam': 1}
def test_spam(spam):
assert spam == params['spam']
params['spam'] += 1
"""
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*3 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*3 passed*"])
def test_autouse_fixture_plugin(self, testdir):
# A fixture from a plugin has no baseid set, which screwed up
# the autouse fixture handling.
testdir.makepyfile(
testplugin="""
import pytest
@pytest.fixture(autouse=True)
def foo(request):
request.function.foo = 7
"""
)
testdir.syspathinsert()
testdir.makepyfile(
"""
pytest_plugins = 'testplugin'
def test_foo(request):
assert request.function.foo == 7
"""
)
result = testdir.runpytest()
assert result.ret == 0
def test_funcarg_lookup_error(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def a_fixture(): pass
@pytest.fixture
def b_fixture(): pass
@pytest.fixture
def c_fixture(): pass
@pytest.fixture
def d_fixture(): pass
"""
)
testdir.makepyfile(
"""
def test_lookup_error(unknown):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at setup of test_lookup_error*",
" def test_lookup_error(unknown):*",
"E fixture 'unknown' not found",
"> available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*",
# sorted
"> use 'py*test --fixtures *' for help on them.",
"*1 error*",
]
)
result.stdout.no_fnmatch_line("*INTERNAL*")
def test_fixture_excinfo_leak(self, testdir):
# on python2 sys.excinfo would leak into fixture executions
testdir.makepyfile(
"""
import sys
import traceback
import pytest
@pytest.fixture
def leak():
if sys.exc_info()[0]: # python3 bug :)
traceback.print_exc()
#fails
assert sys.exc_info() == (None, None, None)
def test_leak(leak):
if sys.exc_info()[0]: # python3 bug :)
traceback.print_exc()
assert sys.exc_info() == (None, None, None)
"""
)
result = testdir.runpytest()
assert result.ret == 0
class TestRequestBasic:
def test_request_attributes(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.fixture
def something(request): pass
def test_func(something): pass
"""
)
req = fixtures.FixtureRequest(item)
assert req.function == item.obj
assert req.keywords == item.keywords
assert hasattr(req.module, "test_func")
assert req.cls is None
assert req.function.__name__ == "test_func"
assert req.config == item.config
assert repr(req).find(req.function.__name__) != -1
def test_request_attributes_method(self, testdir):
(item,) = testdir.getitems(
"""
import pytest
class TestB(object):
@pytest.fixture
def something(self, request):
return 1
def test_func(self, something):
pass
"""
)
req = item._request
assert req.cls.__name__ == "TestB"
assert req.instance.__class__ == req.cls
def test_request_contains_funcarg_arg2fixturedefs(self, testdir):
modcol = testdir.getmodulecol(
"""
import pytest
@pytest.fixture
def something(request):
pass
class TestClass(object):
def test_method(self, something):
pass
"""
)
(item1,) = testdir.genitems([modcol])
assert item1.name == "test_method"
arg2fixturedefs = fixtures.FixtureRequest(item1)._arg2fixturedefs
assert len(arg2fixturedefs) == 1
assert arg2fixturedefs["something"][0].argname == "something"
@pytest.mark.skipif(
hasattr(sys, "pypy_version_info"),
reason="this method of test doesn't work on pypy",
)
def test_request_garbage(self, testdir):
try:
import xdist # noqa
except ImportError:
pass
else:
pytest.xfail("this test is flaky when executed with xdist")
testdir.makepyfile(
"""
import sys
import pytest
from _pytest.fixtures import PseudoFixtureDef
import gc
@pytest.fixture(autouse=True)
def something(request):
original = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
yield
try:
gc.collect()
leaked = [x for _ in gc.garbage if isinstance(_, PseudoFixtureDef)]
assert leaked == []
finally:
gc.set_debug(original)
def test_func():
pass
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["* 1 passed in *"])
def test_getfixturevalue_recursive(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def something(request):
return 1
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def something(request):
return request.getfixturevalue("something") + 1
def test_func(something):
assert something == 2
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_getfixturevalue_teardown(self, testdir):
"""
Issue #1895
`test_inner` requests `inner` fixture, which in turn requests `resource`
using `getfixturevalue`. `test_func` then requests `resource`.
`resource` is teardown before `inner` because the fixture mechanism won't consider
`inner` dependent on `resource` when it is used via `getfixturevalue`: `test_func`
will then cause the `resource`'s finalizer to be called first because of this.
"""
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope='session')
def resource():
r = ['value']
yield r
r.pop()
@pytest.fixture(scope='session')
def inner(request):
resource = request.getfixturevalue('resource')
assert resource == ['value']
yield
assert resource == ['value']
def test_inner(inner):
pass
def test_func(resource):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 2 passed in *"])
def test_getfixturevalue(self, testdir):
item = testdir.getitem(
"""
import pytest
values = [2]
@pytest.fixture
def something(request): return 1
@pytest.fixture
def other(request):
return values.pop()
def test_func(something): pass
"""
)
req = item._request
with pytest.raises(pytest.FixtureLookupError):
req.getfixturevalue("notexists")
val = req.getfixturevalue("something")
assert val == 1
val = req.getfixturevalue("something")
assert val == 1
val2 = req.getfixturevalue("other")
assert val2 == 2
val2 = req.getfixturevalue("other") # see about caching
assert val2 == 2
pytest._fillfuncargs(item)
assert item.funcargs["something"] == 1
assert len(get_public_names(item.funcargs)) == 2
assert "request" in item.funcargs
def test_request_addfinalizer(self, testdir):
item = testdir.getitem(
"""
import pytest
teardownlist = []
@pytest.fixture
def something(request):
request.addfinalizer(lambda: teardownlist.append(1))
def test_func(something): pass
"""
)
item.session._setupstate.prepare(item)
pytest._fillfuncargs(item)
# successively check finalization calls
teardownlist = item.getparent(pytest.Module).obj.teardownlist
ss = item.session._setupstate
assert not teardownlist
ss.teardown_exact(item, None)
print(ss.stack)
assert teardownlist == [1]
def test_request_addfinalizer_failing_setup(self, testdir):
testdir.makepyfile(
"""
import pytest
values = [1]
@pytest.fixture
def myfix(request):
request.addfinalizer(values.pop)
assert 0
def test_fix(myfix):
pass
def test_finalizer_ran():
assert not values
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(failed=1, passed=1)
def test_request_addfinalizer_failing_setup_module(self, testdir):
testdir.makepyfile(
"""
import pytest
values = [1, 2]
@pytest.fixture(scope="module")
def myfix(request):
request.addfinalizer(values.pop)
request.addfinalizer(values.pop)
assert 0
def test_fix(myfix):
pass
"""
)
reprec = testdir.inline_run("-s")
mod = reprec.getcalls("pytest_runtest_setup")[0].item.module
assert not mod.values
def test_request_addfinalizer_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture
def something(request):
request.addfinalizer(lambda: values.append(None))
def test_func(something, missingarg):
pass
def test_second():
assert len(values) == 1
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*1 error*"] # XXX the whole module collection fails
)
def test_request_subrequest_addfinalizer_exceptions(self, testdir):
"""
Ensure exceptions raised during teardown by a finalizer are suppressed
until all finalizers are called, re-raising the first exception (#2440)
"""
testdir.makepyfile(
"""
import pytest
values = []
def _excepts(where):
raise Exception('Error in %s fixture' % where)
@pytest.fixture
def subrequest(request):
return request
@pytest.fixture
def something(subrequest):
subrequest.addfinalizer(lambda: values.append(1))
subrequest.addfinalizer(lambda: values.append(2))
subrequest.addfinalizer(lambda: _excepts('something'))
@pytest.fixture
def excepts(subrequest):
subrequest.addfinalizer(lambda: _excepts('excepts'))
subrequest.addfinalizer(lambda: values.append(3))
def test_first(something, excepts):
pass
def test_second():
assert values == [3, 2, 1]
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*Exception: Error in excepts fixture", "* 2 passed, 1 error in *"]
)
def test_request_getmodulepath(self, testdir):
modcol = testdir.getmodulecol("def test_somefunc(): pass")
(item,) = testdir.genitems([modcol])
req = fixtures.FixtureRequest(item)
assert req.fspath == modcol.fspath
def test_request_fixturenames(self, testdir):
testdir.makepyfile(
"""
import pytest
from _pytest.pytester import get_public_names
@pytest.fixture()
def arg1():
pass
@pytest.fixture()
def farg(arg1):
pass
@pytest.fixture(autouse=True)
def sarg(tmpdir):
pass
def test_function(request, farg):
assert set(get_public_names(request.fixturenames)) == \
set(["tmpdir", "sarg", "arg1", "request", "farg",
"tmp_path", "tmp_path_factory"])
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_request_fixturenames_dynamic_fixture(self, testdir):
"""Regression test for #3057"""
testdir.copy_example("fixtures/test_getfixturevalue_dynamic.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_funcargnames_compatattr(self, testdir):
testdir.makepyfile(
"""
import pytest
def pytest_generate_tests(metafunc):
with pytest.warns(pytest.PytestDeprecationWarning):
assert metafunc.funcargnames == metafunc.fixturenames
@pytest.fixture
def fn(request):
with pytest.warns(pytest.PytestDeprecationWarning):
assert request._pyfuncitem.funcargnames == \
request._pyfuncitem.fixturenames
with pytest.warns(pytest.PytestDeprecationWarning):
return request.funcargnames, request.fixturenames
def test_hello(fn):
assert fn[0] == fn[1]
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setupdecorator_and_xunit(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope='module', autouse=True)
def setup_module():
values.append("module")
@pytest.fixture(autouse=True)
def setup_function():
values.append("function")
def test_func():
pass
class TestClass(object):
@pytest.fixture(scope="class", autouse=True)
def setup_class(self):
values.append("class")
@pytest.fixture(autouse=True)
def setup_method(self):
values.append("method")
def test_method(self):
pass
def test_all():
assert values == ["module", "function", "class",
"function", "method", "function"]
"""
)
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=3)
def test_fixtures_sub_subdir_normalize_sep(self, testdir):
# this tests that normalization of nodeids takes place
b = testdir.mkdir("tests").mkdir("unit")
b.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def arg1():
pass
"""
)
)
p = b.join("test_module.py")
p.write("def test_func(arg1): pass")
result = testdir.runpytest(p, "--fixtures")
assert result.ret == 0
result.stdout.fnmatch_lines(
"""
*fixtures defined*conftest*
*arg1*
"""
)
def test_show_fixtures_color_yes(self, testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("--color=yes", "--fixtures")
assert "\x1b[32mtmpdir" in result.stdout.str()
def test_newstyle_with_request(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture()
def arg(request):
pass
def test_1(arg):
pass
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setupcontext_no_param(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[1,2])
def arg(request):
return request.param
@pytest.fixture(autouse=True)
def mysetup(request, arg):
assert not hasattr(request, "param")
def test_1(arg):
assert arg in (1,2)
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
class TestRequestMarking:
def test_applymarker(self, testdir):
item1, item2 = testdir.getitems(
"""
import pytest
@pytest.fixture
def something(request):
pass
class TestClass(object):
def test_func1(self, something):
pass
def test_func2(self, something):
pass
"""
)
req1 = fixtures.FixtureRequest(item1)
assert "xfail" not in item1.keywords
req1.applymarker(pytest.mark.xfail)
assert "xfail" in item1.keywords
assert "skipif" not in item1.keywords
req1.applymarker(pytest.mark.skipif)
assert "skipif" in item1.keywords
with pytest.raises(ValueError):
req1.applymarker(42)
def test_accesskeywords(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture()
def keywords(request):
return request.keywords
@pytest.mark.XYZ
def test_function(keywords):
assert keywords["XYZ"]
assert "abc" not in keywords
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_accessmarker_dynamic(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture()
def keywords(request):
return request.keywords
@pytest.fixture(scope="class", autouse=True)
def marking(request):
request.applymarker(pytest.mark.XYZ("hello"))
"""
)
testdir.makepyfile(
"""
import pytest
def test_fun1(keywords):
assert keywords["XYZ"] is not None
assert "abc" not in keywords
def test_fun2(keywords):
assert keywords["XYZ"] is not None
assert "abc" not in keywords
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
class TestFixtureUsages:
def test_noargfixturedec(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def arg1():
return 1
def test_func(arg1):
assert arg1 == 1
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_receives_funcargs(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture()
def arg1():
return 1
@pytest.fixture()
def arg2(arg1):
return arg1 + 1
def test_add(arg2):
assert arg2 == 2
def test_all(arg1, arg2):
assert arg1 == 1
assert arg2 == 2
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_receives_funcargs_scope_mismatch(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="function")
def arg1():
return 1
@pytest.fixture(scope="module")
def arg2(arg1):
return arg1 + 1
def test_add(arg2):
assert arg2 == 2
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ScopeMismatch*involved factories*",
"test_receives_funcargs_scope_mismatch.py:6: def arg2(arg1)",
"test_receives_funcargs_scope_mismatch.py:2: def arg1()",
"*1 error*",
]
)
def test_receives_funcargs_scope_mismatch_issue660(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="function")
def arg1():
return 1
@pytest.fixture(scope="module")
def arg2(arg1):
return arg1 + 1
def test_add(arg1, arg2):
assert arg2 == 2
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*ScopeMismatch*involved factories*", "* def arg2*", "*1 error*"]
)
def test_invalid_scope(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="functions")
def badscope():
pass
def test_nothing(badscope):
pass
"""
)
result = testdir.runpytest_inprocess()
result.stdout.fnmatch_lines(
"*Fixture 'badscope' from test_invalid_scope.py got an unexpected scope value 'functions'"
)
@pytest.mark.parametrize("scope", ["function", "session"])
def test_parameters_without_eq_semantics(self, scope, testdir):
testdir.makepyfile(
"""
class NoEq1: # fails on `a == b` statement
def __eq__(self, _):
raise RuntimeError
class NoEq2: # fails on `if a == b:` statement
def __eq__(self, _):
class NoBool:
def __bool__(self):
raise RuntimeError
return NoBool()
import pytest
@pytest.fixture(params=[NoEq1(), NoEq2()], scope={scope!r})
def no_eq(request):
return request.param
def test1(no_eq):
pass
def test2(no_eq):
pass
""".format(
scope=scope
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*4 passed*"])
def test_funcarg_parametrized_and_used_twice(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(params=[1,2])
def arg1(request):
values.append(1)
return request.param
@pytest.fixture()
def arg2(arg1):
return arg1 + 1
def test_add(arg1, arg2):
assert arg2 == arg1 + 1
assert len(values) == arg1
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
def test_factory_uses_unknown_funcarg_as_dependency_error(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture()
def fail(missing):
return
@pytest.fixture()
def call_fail(fail):
return
def test_missing(call_fail):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*pytest.fixture()*
*def call_fail(fail)*
*pytest.fixture()*
*def fail*
*fixture*'missing'*not found*
"""
)
def test_factory_setup_as_classes_fails(self, testdir):
testdir.makepyfile(
"""
import pytest
class arg1(object):
def __init__(self, request):
self.x = 1
arg1 = pytest.fixture()(arg1)
"""
)
reprec = testdir.inline_run()
values = reprec.getfailedcollections()
assert len(values) == 1
def test_usefixtures_marker(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope="class")
def myfix(request):
request.cls.hello = "world"
values.append(1)
class TestClass(object):
def test_one(self):
assert self.hello == "world"
assert len(values) == 1
def test_two(self):
assert self.hello == "world"
assert len(values) == 1
pytest.mark.usefixtures("myfix")(TestClass)
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_usefixtures_ini(self, testdir):
testdir.makeini(
"""
[pytest]
usefixtures = myfix
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.fixture(scope="class")
def myfix(request):
request.cls.hello = "world"
"""
)
testdir.makepyfile(
"""
class TestClass(object):
def test_one(self):
assert self.hello == "world"
def test_two(self):
assert self.hello == "world"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_usefixtures_seen_in_showmarkers(self, testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(
"""
*usefixtures(fixturename1*mark tests*fixtures*
"""
)
def test_request_instance_issue203(self, testdir):
testdir.makepyfile(
"""
import pytest
class TestClass(object):
@pytest.fixture
def setup1(self, request):
assert self == request.instance
self.arg1 = 1
def test_hello(self, setup1):
assert self.arg1 == 1
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_fixture_parametrized_with_iterator(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
def f():
yield 1
yield 2
dec = pytest.fixture(scope="module", params=f())
@dec
def arg(request):
return request.param
@dec
def arg2(request):
return request.param
def test_1(arg):
values.append(arg)
def test_2(arg2):
values.append(arg2*10)
"""
)
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=4)
values = reprec.getcalls("pytest_runtest_call")[0].item.module.values
assert values == [1, 2, 10, 20]
def test_setup_functions_as_fixtures(self, testdir):
"""Ensure setup_* methods obey fixture scope rules (#517, #3094)."""
testdir.makepyfile(
"""
import pytest
DB_INITIALIZED = None
@pytest.yield_fixture(scope="session", autouse=True)
def db():
global DB_INITIALIZED
DB_INITIALIZED = True
yield
DB_INITIALIZED = False
def setup_module():
assert DB_INITIALIZED
def teardown_module():
assert DB_INITIALIZED
class TestClass(object):
def setup_method(self, method):
assert DB_INITIALIZED
def teardown_method(self, method):
assert DB_INITIALIZED
def test_printer_1(self):
pass
def test_printer_2(self):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 2 passed in *"])
class TestFixtureManagerParseFactories:
@pytest.fixture
def testdir(self, request):
testdir = request.getfixturevalue("testdir")
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def hello(request):
return "conftest"
@pytest.fixture
def fm(request):
return request._fixturemanager
@pytest.fixture
def item(request):
return request._pyfuncitem
"""
)
return testdir
def test_parsefactories_evil_objects_issue214(self, testdir):
testdir.makepyfile(
"""
class A(object):
def __call__(self):
pass
def __getattr__(self, name):
raise RuntimeError()
a = A()
def test_hello():
pass
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1, failed=0)
def test_parsefactories_conftest(self, testdir):
testdir.makepyfile(
"""
def test_hello(item, fm):
for name in ("fm", "hello", "item"):
faclist = fm.getfixturedefs(name, item.nodeid)
assert len(faclist) == 1
fac = faclist[0]
assert fac.func.__name__ == name
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_parsefactories_conftest_and_module_and_class(self, testdir):
testdir.makepyfile(
"""\
import pytest
@pytest.fixture
def hello(request):
return "module"
class TestClass(object):
@pytest.fixture
def hello(self, request):
return "class"
def test_hello(self, item, fm):
faclist = fm.getfixturedefs("hello", item.nodeid)
print(faclist)
assert len(faclist) == 3
assert faclist[0].func(item._request) == "conftest"
assert faclist[1].func(item._request) == "module"
assert faclist[2].func(item._request) == "class"
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_parsefactories_relative_node_ids(self, testdir):
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("runner")
package = testdir.mkdir("package")
package.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def one():
return 1
"""
)
)
package.join("test_x.py").write(
textwrap.dedent(
"""\
def test_x(one):
assert one == 1
"""
)
)
sub = package.mkdir("sub")
sub.join("__init__.py").ensure()
sub.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def one():
return 2
"""
)
)
sub.join("test_y.py").write(
textwrap.dedent(
"""\
def test_x(one):
assert one == 2
"""
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
with runner.as_cwd():
reprec = testdir.inline_run("..")
reprec.assertoutcome(passed=2)
def test_package_xunit_fixture(self, testdir):
testdir.makepyfile(
__init__="""\
values = []
"""
)
package = testdir.mkdir("package")
package.join("__init__.py").write(
textwrap.dedent(
"""\
from .. import values
def setup_module():
values.append("package")
def teardown_module():
values[:] = []
"""
)
)
package.join("test_x.py").write(
textwrap.dedent(
"""\
from .. import values
def test_x():
assert values == ["package"]
"""
)
)
package = testdir.mkdir("package2")
package.join("__init__.py").write(
textwrap.dedent(
"""\
from .. import values
def setup_module():
values.append("package2")
def teardown_module():
values[:] = []
"""
)
)
package.join("test_x.py").write(
textwrap.dedent(
"""\
from .. import values
def test_x():
assert values == ["package2"]
"""
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_package_fixture_complex(self, testdir):
testdir.makepyfile(
__init__="""\
values = []
"""
)
testdir.syspathinsert(testdir.tmpdir.dirname)
package = testdir.mkdir("package")
package.join("__init__.py").write("")
package.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
from .. import values
@pytest.fixture(scope="package")
def one():
values.append("package")
yield values
values.pop()
@pytest.fixture(scope="package", autouse=True)
def two():
values.append("package-auto")
yield values
values.pop()
"""
)
)
package.join("test_x.py").write(
textwrap.dedent(
"""\
from .. import values
def test_package_autouse():
assert values == ["package-auto"]
def test_package(one):
assert values == ["package-auto", "package"]
"""
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_collect_custom_items(self, testdir):
testdir.copy_example("fixtures/custom_item")
result = testdir.runpytest("foo")
result.stdout.fnmatch_lines(["*passed*"])
class TestAutouseDiscovery:
@pytest.fixture
def testdir(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True)
def perfunction(request, tmpdir):
pass
@pytest.fixture()
def arg1(tmpdir):
pass
@pytest.fixture(autouse=True)
def perfunction2(arg1):
pass
@pytest.fixture
def fm(request):
return request._fixturemanager
@pytest.fixture
def item(request):
return request._pyfuncitem
"""
)
return testdir
def test_parsefactories_conftest(self, testdir):
testdir.makepyfile(
"""
from _pytest.pytester import get_public_names
def test_check_setup(item, fm):
autousenames = fm._getautousenames(item.nodeid)
assert len(get_public_names(autousenames)) == 2
assert "perfunction2" in autousenames
assert "perfunction" in autousenames
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_two_classes_separated_autouse(self, testdir):
testdir.makepyfile(
"""
import pytest
class TestA(object):
values = []
@pytest.fixture(autouse=True)
def setup1(self):
self.values.append(1)
def test_setup1(self):
assert self.values == [1]
class TestB(object):
values = []
@pytest.fixture(autouse=True)
def setup2(self):
self.values.append(1)
def test_setup2(self):
assert self.values == [1]
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_setup_at_classlevel(self, testdir):
testdir.makepyfile(
"""
import pytest
class TestClass(object):
@pytest.fixture(autouse=True)
def permethod(self, request):
request.instance.funcname = request.function.__name__
def test_method1(self):
assert self.funcname == "test_method1"
def test_method2(self):
assert self.funcname == "test_method2"
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=2)
@pytest.mark.xfail(reason="'enabled' feature not implemented")
def test_setup_enabled_functionnode(self, testdir):
testdir.makepyfile(
"""
import pytest
def enabled(parentnode, markers):
return "needsdb" in markers
@pytest.fixture(params=[1,2])
def db(request):
return request.param
@pytest.fixture(enabled=enabled, autouse=True)
def createdb(db):
pass
def test_func1(request):
assert "db" not in request.fixturenames
@pytest.mark.needsdb
def test_func2(request):
assert "db" in request.fixturenames
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=2)
def test_callables_nocode(self, testdir):
"""
an imported mock.call would break setup/factory discovery
due to it being callable and __code__ not being a code object
"""
testdir.makepyfile(
"""
class _call(tuple):
def __call__(self, *k, **kw):
pass
def __getattr__(self, k):
return self
call = _call()
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(failed=0, passed=0)
def test_autouse_in_conftests(self, testdir):
a = testdir.mkdir("a")
b = testdir.mkdir("a1")
conftest = testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True)
def hello():
xxx
"""
)
conftest.move(a.join(conftest.basename))
a.join("test_something.py").write("def test_func(): pass")
b.join("test_otherthing.py").write("def test_func(): pass")
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*1 passed*1 error*
"""
)
def test_autouse_in_module_and_two_classes(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(autouse=True)
def append1():
values.append("module")
def test_x():
assert values == ["module"]
class TestA(object):
@pytest.fixture(autouse=True)
def append2(self):
values.append("A")
def test_hello(self):
assert values == ["module", "module", "A"], values
class TestA2(object):
def test_world(self):
assert values == ["module", "module", "A", "module"], values
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=3)
class TestAutouseManagement:
def test_autouse_conftest_mid_directory(self, testdir):
pkgdir = testdir.mkpydir("xyz123")
pkgdir.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture(autouse=True)
def app():
import sys
sys._myapp = "hello"
"""
)
)
t = pkgdir.ensure("tests", "test_app.py")
t.write(
textwrap.dedent(
"""\
import sys
def test_app():
assert sys._myapp == "hello"
"""
)
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_funcarg_and_setup(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope="module")
def arg():
values.append(1)
return 0
@pytest.fixture(scope="module", autouse=True)
def something(arg):
values.append(2)
def test_hello(arg):
assert len(values) == 2
assert values == [1,2]
assert arg == 0
def test_hello2(arg):
assert len(values) == 2
assert values == [1,2]
assert arg == 0
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_uses_parametrized_resource(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(params=[1,2])
def arg(request):
return request.param
@pytest.fixture(autouse=True)
def something(arg):
values.append(arg)
def test_hello():
if len(values) == 1:
assert values == [1]
elif len(values) == 2:
assert values == [1, 2]
else:
0/0
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=2)
def test_session_parametrized_function(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope="session", params=[1,2])
def arg(request):
return request.param
@pytest.fixture(scope="function", autouse=True)
def append(request, arg):
if request.function.__name__ == "test_some":
values.append(arg)
def test_some():
pass
def test_result(arg):
assert len(values) == arg
assert values[:arg] == [1,2][:arg]
"""
)
reprec = testdir.inline_run("-v", "-s")
reprec.assertoutcome(passed=4)
def test_class_function_parametrization_finalization(self, testdir):
p = testdir.makeconftest(
"""
import pytest
import pprint
values = []
@pytest.fixture(scope="function", params=[1,2])
def farg(request):
return request.param
@pytest.fixture(scope="class", params=list("ab"))
def carg(request):
return request.param
@pytest.fixture(scope="function", autouse=True)
def append(request, farg, carg):
def fin():
values.append("fin_%s%s" % (carg, farg))
request.addfinalizer(fin)
"""
)
testdir.makepyfile(
"""
import pytest
class TestClass(object):
def test_1(self):
pass
class TestClass2(object):
def test_2(self):
pass
"""
)
confcut = "--confcutdir={}".format(testdir.tmpdir)
reprec = testdir.inline_run("-v", "-s", confcut)
reprec.assertoutcome(passed=8)
config = reprec.getcalls("pytest_unconfigure")[0].config
values = config.pluginmanager._getconftestmodules(p)[0].values
assert values == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2
def test_scope_ordering(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope="function", autouse=True)
def fappend2():
values.append(2)
@pytest.fixture(scope="class", autouse=True)
def classappend3():
values.append(3)
@pytest.fixture(scope="module", autouse=True)
def mappend():
values.append(1)
class TestHallo(object):
def test_method(self):
assert values == [1,3,2]
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_parametrization_setup_teardown_ordering(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
def pytest_generate_tests(metafunc):
if metafunc.cls is None:
assert metafunc.function is test_finish
if metafunc.cls is not None:
metafunc.parametrize("item", [1,2], scope="class")
class TestClass(object):
@pytest.fixture(scope="class", autouse=True)
def addteardown(self, item, request):
values.append("setup-%d" % item)
request.addfinalizer(lambda: values.append("teardown-%d" % item))
def test_step1(self, item):
values.append("step1-%d" % item)
def test_step2(self, item):
values.append("step2-%d" % item)
def test_finish():
print(values)
assert values == ["setup-1", "step1-1", "step2-1", "teardown-1",
"setup-2", "step1-2", "step2-2", "teardown-2",]
"""
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=5)
def test_ordering_autouse_before_explicit(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(autouse=True)
def fix1():
values.append(1)
@pytest.fixture()
def arg1():
values.append(2)
def test_hello(arg1):
assert values == [1,2]
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00", "p01"])
@pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10", "p11"])
def test_ordering_dependencies_torndown_first(self, testdir, param1, param2):
"""#226"""
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(%(param1)s)
def arg1(request):
request.addfinalizer(lambda: values.append("fin1"))
values.append("new1")
@pytest.fixture(%(param2)s)
def arg2(request, arg1):
request.addfinalizer(lambda: values.append("fin2"))
values.append("new2")
def test_arg(arg2):
pass
def test_check():
assert values == ["new1", "new2", "fin2", "fin1"]
"""
% locals()
)
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=2)
class TestFixtureMarker:
def test_parametrize(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=["a", "b", "c"])
def arg(request):
return request.param
values = []
def test_param(arg):
values.append(arg)
def test_result():
assert values == list("abc")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=4)
def test_multiple_parametrization_issue_736(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[1,2,3])
def foo(request):
return request.param
@pytest.mark.parametrize('foobar', [4,5,6])
def test_issue(foo, foobar):
assert foo in [1,2,3]
assert foobar in [4,5,6]
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=9)
@pytest.mark.parametrize(
"param_args",
["'fixt, val'", "'fixt,val'", "['fixt', 'val']", "('fixt', 'val')"],
)
def test_override_parametrized_fixture_issue_979(self, testdir, param_args):
"""Make sure a parametrized argument can override a parametrized fixture.
This was a regression introduced in the fix for #736.
"""
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[1, 2])
def fixt(request):
return request.param
@pytest.mark.parametrize(%s, [(3, 'x'), (4, 'x')])
def test_foo(fixt, val):
pass
"""
% param_args
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_scope_session(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope="module")
def arg():
values.append(1)
return 1
def test_1(arg):
assert arg == 1
def test_2(arg):
assert arg == 1
assert len(values) == 1
class TestClass(object):
def test3(self, arg):
assert arg == 1
assert len(values) == 1
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=3)
def test_scope_session_exc(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope="session")
def fix():
values.append(1)
pytest.skip('skipping')
def test_1(fix):
pass
def test_2(fix):
pass
def test_last():
assert values == [1]
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=2, passed=1)
def test_scope_session_exc_two_fix(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
m = []
@pytest.fixture(scope="session")
def a():
values.append(1)
pytest.skip('skipping')
@pytest.fixture(scope="session")
def b(a):
m.append(1)
def test_1(b):
pass
def test_2(b):
pass
def test_last():
assert values == [1]
assert m == []
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=2, passed=1)
def test_scope_exc(self, testdir):
testdir.makepyfile(
test_foo="""
def test_foo(fix):
pass
""",
test_bar="""
def test_bar(fix):
pass
""",
conftest="""
import pytest
reqs = []
@pytest.fixture(scope="session")
def fix(request):
reqs.append(1)
pytest.skip()
@pytest.fixture
def req_list():
return reqs
""",
test_real="""
def test_last(req_list):
assert req_list == [1]
""",
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=2, passed=1)
def test_scope_module_uses_session(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope="module")
def arg():
values.append(1)
return 1
def test_1(arg):
assert arg == 1
def test_2(arg):
assert arg == 1
assert len(values) == 1
class TestClass(object):
def test3(self, arg):
assert arg == 1
assert len(values) == 1
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=3)
def test_scope_module_and_finalizer(self, testdir):
testdir.makeconftest(
"""
import pytest
finalized_list = []
created_list = []
@pytest.fixture(scope="module")
def arg(request):
created_list.append(1)
assert request.scope == "module"
request.addfinalizer(lambda: finalized_list.append(1))
@pytest.fixture
def created(request):
return len(created_list)
@pytest.fixture
def finalized(request):
return len(finalized_list)
"""
)
testdir.makepyfile(
test_mod1="""
def test_1(arg, created, finalized):
assert created == 1
assert finalized == 0
def test_2(arg, created, finalized):
assert created == 1
assert finalized == 0""",
test_mod2="""
def test_3(arg, created, finalized):
assert created == 2
assert finalized == 1""",
test_mode3="""
def test_4(arg, created, finalized):
assert created == 3
assert finalized == 2
""",
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=4)
def test_scope_mismatch_various(self, testdir):
testdir.makeconftest(
"""
import pytest
finalized = []
created = []
@pytest.fixture(scope="function")
def arg(request):
pass
"""
)
testdir.makepyfile(
test_mod1="""
import pytest
@pytest.fixture(scope="session")
def arg(request):
request.getfixturevalue("arg")
def test_1(arg):
pass
"""
)
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines(
["*ScopeMismatch*You tried*function*session*request*"]
)
def test_dynamic_scope(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_addoption(parser):
parser.addoption("--extend-scope", action="store_true", default=False)
def dynamic_scope(fixture_name, config):
if config.getoption("--extend-scope"):
return "session"
return "function"
@pytest.fixture(scope=dynamic_scope)
def dynamic_fixture(calls=[]):
calls.append("call")
return len(calls)
"""
)
testdir.makepyfile(
"""
def test_first(dynamic_fixture):
assert dynamic_fixture == 1
def test_second(dynamic_fixture):
assert dynamic_fixture == 2
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
reprec = testdir.inline_run("--extend-scope")
reprec.assertoutcome(passed=1, failed=1)
def test_dynamic_scope_bad_return(self, testdir):
testdir.makepyfile(
"""
import pytest
def dynamic_scope(**_):
return "wrong-scope"
@pytest.fixture(scope=dynamic_scope)
def fixture():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"Fixture 'fixture' from test_dynamic_scope_bad_return.py "
"got an unexpected scope value 'wrong-scope'"
)
def test_register_only_with_mark(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture()
def arg():
return 1
"""
)
testdir.makepyfile(
test_mod1="""
import pytest
@pytest.fixture()
def arg(arg):
return arg + 1
def test_1(arg):
assert arg == 2
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_parametrize_and_scope(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="module", params=["a", "b", "c"])
def arg(request):
return request.param
values = []
def test_param(arg):
values.append(arg)
"""
)
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=3)
values = reprec.getcalls("pytest_runtest_call")[0].item.module.values
assert len(values) == 3
assert "a" in values
assert "b" in values
assert "c" in values
def test_scope_mismatch(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture(scope="function")
def arg(request):
pass
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="session")
def arg(arg):
pass
def test_mismatch(arg):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*ScopeMismatch*", "*1 error*"])
def test_parametrize_separated_order(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="module", params=[1, 2])
def arg(request):
return request.param
values = []
def test_1(arg):
values.append(arg)
def test_2(arg):
values.append(arg)
"""
)
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=4)
values = reprec.getcalls("pytest_runtest_call")[0].item.module.values
assert values == [1, 1, 2, 2]
def test_module_parametrized_ordering(self, testdir):
testdir.makeini(
"""
[pytest]
console_output_style=classic
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.fixture(scope="session", params="s1 s2".split())
def sarg():
pass
@pytest.fixture(scope="module", params="m1 m2".split())
def marg():
pass
"""
)
testdir.makepyfile(
test_mod1="""
def test_func(sarg):
pass
def test_func1(marg):
pass
""",
test_mod2="""
def test_func2(sarg):
pass
def test_func3(sarg, marg):
pass
def test_func3b(sarg, marg):
pass
def test_func4(marg):
pass
""",
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(
"""
test_mod1.py::test_func[s1] PASSED
test_mod2.py::test_func2[s1] PASSED
test_mod2.py::test_func3[s1-m1] PASSED
test_mod2.py::test_func3b[s1-m1] PASSED
test_mod2.py::test_func3[s1-m2] PASSED
test_mod2.py::test_func3b[s1-m2] PASSED
test_mod1.py::test_func[s2] PASSED
test_mod2.py::test_func2[s2] PASSED
test_mod2.py::test_func3[s2-m1] PASSED
test_mod2.py::test_func3b[s2-m1] PASSED
test_mod2.py::test_func4[m1] PASSED
test_mod2.py::test_func3[s2-m2] PASSED
test_mod2.py::test_func3b[s2-m2] PASSED
test_mod2.py::test_func4[m2] PASSED
test_mod1.py::test_func1[m1] PASSED
test_mod1.py::test_func1[m2] PASSED
"""
)
def test_dynamic_parametrized_ordering(self, testdir):
testdir.makeini(
"""
[pytest]
console_output_style=classic
"""
)
testdir.makeconftest(
"""
import pytest
def pytest_configure(config):
class DynamicFixturePlugin(object):
@pytest.fixture(scope='session', params=['flavor1', 'flavor2'])
def flavor(self, request):
return request.param
config.pluginmanager.register(DynamicFixturePlugin(), 'flavor-fixture')
@pytest.fixture(scope='session', params=['vxlan', 'vlan'])
def encap(request):
return request.param
@pytest.fixture(scope='session', autouse='True')
def reprovision(request, flavor, encap):
pass
"""
)
testdir.makepyfile(
"""
def test(reprovision):
pass
def test2(reprovision):
pass
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(
"""
test_dynamic_parametrized_ordering.py::test[flavor1-vxlan] PASSED
test_dynamic_parametrized_ordering.py::test2[flavor1-vxlan] PASSED
test_dynamic_parametrized_ordering.py::test[flavor2-vxlan] PASSED
test_dynamic_parametrized_ordering.py::test2[flavor2-vxlan] PASSED
test_dynamic_parametrized_ordering.py::test[flavor2-vlan] PASSED
test_dynamic_parametrized_ordering.py::test2[flavor2-vlan] PASSED
test_dynamic_parametrized_ordering.py::test[flavor1-vlan] PASSED
test_dynamic_parametrized_ordering.py::test2[flavor1-vlan] PASSED
"""
)
def test_class_ordering(self, testdir):
testdir.makeini(
"""
[pytest]
console_output_style=classic
"""
)
testdir.makeconftest(
"""
import pytest
values = []
@pytest.fixture(scope="function", params=[1,2])
def farg(request):
return request.param
@pytest.fixture(scope="class", params=list("ab"))
def carg(request):
return request.param
@pytest.fixture(scope="function", autouse=True)
def append(request, farg, carg):
def fin():
values.append("fin_%s%s" % (carg, farg))
request.addfinalizer(fin)
"""
)
testdir.makepyfile(
"""
import pytest
class TestClass2(object):
def test_1(self):
pass
def test_2(self):
pass
class TestClass(object):
def test_3(self):
pass
"""
)
result = testdir.runpytest("-vs")
result.stdout.re_match_lines(
r"""
test_class_ordering.py::TestClass2::test_1\[a-1\] PASSED
test_class_ordering.py::TestClass2::test_1\[a-2\] PASSED
test_class_ordering.py::TestClass2::test_2\[a-1\] PASSED
test_class_ordering.py::TestClass2::test_2\[a-2\] PASSED
test_class_ordering.py::TestClass2::test_1\[b-1\] PASSED
test_class_ordering.py::TestClass2::test_1\[b-2\] PASSED
test_class_ordering.py::TestClass2::test_2\[b-1\] PASSED
test_class_ordering.py::TestClass2::test_2\[b-2\] PASSED
test_class_ordering.py::TestClass::test_3\[a-1\] PASSED
test_class_ordering.py::TestClass::test_3\[a-2\] PASSED
test_class_ordering.py::TestClass::test_3\[b-1\] PASSED
test_class_ordering.py::TestClass::test_3\[b-2\] PASSED
"""
)
def test_parametrize_separated_order_higher_scope_first(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="function", params=[1, 2])
def arg(request):
param = request.param
request.addfinalizer(lambda: values.append("fin:%s" % param))
values.append("create:%s" % param)
return request.param
@pytest.fixture(scope="module", params=["mod1", "mod2"])
def modarg(request):
param = request.param
request.addfinalizer(lambda: values.append("fin:%s" % param))
values.append("create:%s" % param)
return request.param
values = []
def test_1(arg):
values.append("test1")
def test_2(modarg):
values.append("test2")
def test_3(arg, modarg):
values.append("test3")
def test_4(modarg, arg):
values.append("test4")
"""
)
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=12)
values = reprec.getcalls("pytest_runtest_call")[0].item.module.values
expected = [
"create:1",
"test1",
"fin:1",
"create:2",
"test1",
"fin:2",
"create:mod1",
"test2",
"create:1",
"test3",
"fin:1",
"create:2",
"test3",
"fin:2",
"create:1",
"test4",
"fin:1",
"create:2",
"test4",
"fin:2",
"fin:mod1",
"create:mod2",
"test2",
"create:1",
"test3",
"fin:1",
"create:2",
"test3",
"fin:2",
"create:1",
"test4",
"fin:1",
"create:2",
"test4",
"fin:2",
"fin:mod2",
]
import pprint
pprint.pprint(list(zip(values, expected)))
assert values == expected
def test_parametrized_fixture_teardown_order(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[1,2], scope="class")
def param1(request):
return request.param
values = []
class TestClass(object):
@classmethod
@pytest.fixture(scope="class", autouse=True)
def setup1(self, request, param1):
values.append(1)
request.addfinalizer(self.teardown1)
@classmethod
def teardown1(self):
assert values.pop() == 1
@pytest.fixture(scope="class", autouse=True)
def setup2(self, request, param1):
values.append(2)
request.addfinalizer(self.teardown2)
@classmethod
def teardown2(self):
assert values.pop() == 2
def test(self):
pass
def test_finish():
assert not values
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(
"""
*3 passed*
"""
)
result.stdout.no_fnmatch_line("*error*")
def test_fixture_finalizer(self, testdir):
testdir.makeconftest(
"""
import pytest
import sys
@pytest.fixture
def browser(request):
def finalize():
sys.stdout.write('Finalized')
request.addfinalizer(finalize)
return {}
"""
)
b = testdir.mkdir("subdir")
b.join("test_overridden_fixture_finalizer.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def browser(browser):
browser['visited'] = True
return browser
def test_browser(browser):
assert browser['visited'] is True
"""
)
)
reprec = testdir.runpytest("-s")
for test in ["test_browser"]:
reprec.stdout.fnmatch_lines(["*Finalized*"])
def test_class_scope_with_normal_tests(self, testdir):
testpath = testdir.makepyfile(
"""
import pytest
class Box(object):
value = 0
@pytest.fixture(scope='class')
def a(request):
Box.value += 1
return Box.value
def test_a(a):
assert a == 1
class Test1(object):
def test_b(self, a):
assert a == 2
class Test2(object):
def test_c(self, a):
assert a == 3"""
)
reprec = testdir.inline_run(testpath)
for test in ["test_a", "test_b", "test_c"]:
assert reprec.matchreport(test).passed
def test_request_is_clean(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(params=[1, 2])
def fix(request):
request.addfinalizer(lambda: values.append(request.param))
def test_fix(fix):
pass
"""
)
reprec = testdir.inline_run("-s")
values = reprec.getcalls("pytest_runtest_call")[0].item.module.values
assert values == [1, 2]
def test_parametrize_separated_lifecycle(self, testdir):
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope="module", params=[1, 2])
def arg(request):
x = request.param
request.addfinalizer(lambda: values.append("fin%s" % x))
return request.param
def test_1(arg):
values.append(arg)
def test_2(arg):
values.append(arg)
"""
)
reprec = testdir.inline_run("-vs")
reprec.assertoutcome(passed=4)
values = reprec.getcalls("pytest_runtest_call")[0].item.module.values
import pprint
pprint.pprint(values)
# assert len(values) == 6
assert values[0] == values[1] == 1
assert values[2] == "fin1"
assert values[3] == values[4] == 2
assert values[5] == "fin2"
def test_parametrize_function_scoped_finalizers_called(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="function", params=[1, 2])
def arg(request):
x = request.param
request.addfinalizer(lambda: values.append("fin%s" % x))
return request.param
values = []
def test_1(arg):
values.append(arg)
def test_2(arg):
values.append(arg)
def test_3():
assert len(values) == 8
assert values == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"]
"""
)
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=5)
@pytest.mark.parametrize("scope", ["session", "function", "module"])
def test_finalizer_order_on_parametrization(self, scope, testdir):
"""#246"""
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(scope=%(scope)r, params=["1"])
def fix1(request):
return request.param
@pytest.fixture(scope=%(scope)r)
def fix2(request, base):
def cleanup_fix2():
assert not values, "base should not have been finalized"
request.addfinalizer(cleanup_fix2)
@pytest.fixture(scope=%(scope)r)
def base(request, fix1):
def cleanup_base():
values.append("fin_base")
print("finalizing base")
request.addfinalizer(cleanup_base)
def test_begin():
pass
def test_baz(base, fix2):
pass
def test_other():
pass
"""
% {"scope": scope}
)
reprec = testdir.inline_run("-lvs")
reprec.assertoutcome(passed=3)
def test_class_scope_parametrization_ordering(self, testdir):
"""#396"""
testdir.makepyfile(
"""
import pytest
values = []
@pytest.fixture(params=["John", "Doe"], scope="class")
def human(request):
request.addfinalizer(lambda: values.append("fin %s" % request.param))
return request.param
class TestGreetings(object):
def test_hello(self, human):
values.append("test_hello")
class TestMetrics(object):
def test_name(self, human):
values.append("test_name")
def test_population(self, human):
values.append("test_population")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=6)
values = reprec.getcalls("pytest_runtest_call")[0].item.module.values
assert values == [
"test_hello",
"fin John",
"test_hello",
"fin Doe",
"test_name",
"test_population",
"fin John",
"test_name",
"test_population",
"fin Doe",
]
def test_parametrize_setup_function(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="module", params=[1, 2])
def arg(request):
return request.param
@pytest.fixture(scope="module", autouse=True)
def mysetup(request, arg):
request.addfinalizer(lambda: values.append("fin%s" % arg))
values.append("setup%s" % arg)
values = []
def test_1(arg):
values.append(arg)
def test_2(arg):
values.append(arg)
def test_3():
import pprint
pprint.pprint(values)
if arg == 1:
assert values == ["setup1", 1, 1, ]
elif arg == 2:
assert values == ["setup1", 1, 1, "fin1",
"setup2", 2, 2, ]
"""
)
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=6)
def test_fixture_marked_function_not_collected_as_test(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def test_app():
return 1
def test_something(test_app):
assert test_app == 1
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_params_and_ids(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[object(), object()],
ids=['alpha', 'beta'])
def fix(request):
return request.param
def test_foo(fix):
assert 1
"""
)
res = testdir.runpytest("-v")
res.stdout.fnmatch_lines(["*test_foo*alpha*", "*test_foo*beta*"])
def test_params_and_ids_yieldfixture(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.yield_fixture(params=[object(), object()],
ids=['alpha', 'beta'])
def fix(request):
yield request.param
def test_foo(fix):
assert 1
"""
)
res = testdir.runpytest("-v")
res.stdout.fnmatch_lines(["*test_foo*alpha*", "*test_foo*beta*"])
def test_deterministic_fixture_collection(self, testdir, monkeypatch):
"""#920"""
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="module",
params=["A",
"B",
"C"])
def A(request):
return request.param
@pytest.fixture(scope="module",
params=["DDDDDDDDD", "EEEEEEEEEEEE", "FFFFFFFFFFF", "banansda"])
def B(request, A):
return request.param
def test_foo(B):
# Something funky is going on here.
# Despite specified seeds, on what is collected,
# sometimes we get unexpected passes. hashing B seems
# to help?
assert hash(B) or True
"""
)
monkeypatch.setenv("PYTHONHASHSEED", "1")
out1 = testdir.runpytest_subprocess("-v")
monkeypatch.setenv("PYTHONHASHSEED", "2")
out2 = testdir.runpytest_subprocess("-v")
out1 = [
line
for line in out1.outlines
if line.startswith("test_deterministic_fixture_collection.py::test_foo")
]
out2 = [
line
for line in out2.outlines
if line.startswith("test_deterministic_fixture_collection.py::test_foo")
]
assert len(out1) == 12
assert out1 == out2
class TestRequestScopeAccess:
pytestmark = pytest.mark.parametrize(
("scope", "ok", "error"),
[
["session", "", "fspath class function module"],
["module", "module fspath", "cls function"],
["class", "module fspath cls", "function"],
["function", "module fspath cls function", ""],
],
)
def test_setup(self, testdir, scope, ok, error):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope=%r, autouse=True)
def myscoped(request):
for x in %r:
assert hasattr(request, x)
for x in %r:
pytest.raises(AttributeError, lambda:
getattr(request, x))
assert request.session
assert request.config
def test_func():
pass
"""
% (scope, ok.split(), error.split())
)
reprec = testdir.inline_run("-l")
reprec.assertoutcome(passed=1)
def test_funcarg(self, testdir, scope, ok, error):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope=%r)
def arg(request):
for x in %r:
assert hasattr(request, x)
for x in %r:
pytest.raises(AttributeError, lambda:
getattr(request, x))
assert request.session
assert request.config
def test_func(arg):
pass
"""
% (scope, ok.split(), error.split())
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestErrors:
def test_subfactory_missing_funcarg(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture()
def gen(qwe123):
return 1
def test_something(gen):
pass
"""
)
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines(
["*def gen(qwe123):*", "*fixture*qwe123*not found*", "*1 error*"]
)
def test_issue498_fixture_finalizer_failing(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def fix1(request):
def f():
raise KeyError
request.addfinalizer(f)
return object()
values = []
def test_1(fix1):
values.append(fix1)
def test_2(fix1):
values.append(fix1)
def test_3():
assert values[0] != values[1]
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*ERROR*teardown*test_1*
*KeyError*
*ERROR*teardown*test_2*
*KeyError*
*3 pass*2 errors*
"""
)
def test_setupfunc_missing_funcarg(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(autouse=True)
def gen(qwe123):
return 1
def test_something():
pass
"""
)
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines(
["*def gen(qwe123):*", "*fixture*qwe123*not found*", "*1 error*"]
)
class TestShowFixtures:
def test_funcarg_compat(self, testdir):
config = testdir.parseconfigure("--funcargs")
assert config.option.showfixtures
def test_show_fixtures(self, testdir):
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines(
[
"tmpdir_factory [[]session scope[]]",
"*for the test session*",
"tmpdir",
"*temporary directory*",
]
)
def test_show_fixtures_verbose(self, testdir):
result = testdir.runpytest("--fixtures", "-v")
result.stdout.fnmatch_lines(
[
"tmpdir_factory [[]session scope[]] -- *tmpdir.py*",
"*for the test session*",
"tmpdir -- *tmpdir.py*",
"*temporary directory*",
]
)
def test_show_fixtures_testmodule(self, testdir):
p = testdir.makepyfile(
'''
import pytest
@pytest.fixture
def _arg0():
""" hidden """
@pytest.fixture
def arg1():
""" hello world """
'''
)
result = testdir.runpytest("--fixtures", p)
result.stdout.fnmatch_lines(
"""
*tmpdir
*fixtures defined from*
*arg1*
*hello world*
"""
)
result.stdout.no_fnmatch_line("*arg0*")
@pytest.mark.parametrize("testmod", [True, False])
def test_show_fixtures_conftest(self, testdir, testmod):
testdir.makeconftest(
'''
import pytest
@pytest.fixture
def arg1():
""" hello world """
'''
)
if testmod:
testdir.makepyfile(
"""
def test_hello():
pass
"""
)
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines(
"""
*tmpdir*
*fixtures defined from*conftest*
*arg1*
*hello world*
"""
)
def test_show_fixtures_trimmed_doc(self, testdir):
p = testdir.makepyfile(
textwrap.dedent(
'''\
import pytest
@pytest.fixture
def arg1():
"""
line1
line2
"""
@pytest.fixture
def arg2():
"""
line1
line2
"""
'''
)
)
result = testdir.runpytest("--fixtures", p)
result.stdout.fnmatch_lines(
textwrap.dedent(
"""\
* fixtures defined from test_show_fixtures_trimmed_doc *
arg2
line1
line2
arg1
line1
line2
"""
)
)
def test_show_fixtures_indented_doc(self, testdir):
p = testdir.makepyfile(
textwrap.dedent(
'''\
import pytest
@pytest.fixture
def fixture1():
"""
line1
indented line
"""
'''
)
)
result = testdir.runpytest("--fixtures", p)
result.stdout.fnmatch_lines(
textwrap.dedent(
"""\
* fixtures defined from test_show_fixtures_indented_doc *
fixture1
line1
indented line
"""
)
)
def test_show_fixtures_indented_doc_first_line_unindented(self, testdir):
p = testdir.makepyfile(
textwrap.dedent(
'''\
import pytest
@pytest.fixture
def fixture1():
"""line1
line2
indented line
"""
'''
)
)
result = testdir.runpytest("--fixtures", p)
result.stdout.fnmatch_lines(
textwrap.dedent(
"""\
* fixtures defined from test_show_fixtures_indented_doc_first_line_unindented *
fixture1
line1
line2
indented line
"""
)
)
def test_show_fixtures_indented_in_class(self, testdir):
p = testdir.makepyfile(
textwrap.dedent(
'''\
import pytest
class TestClass(object):
@pytest.fixture
def fixture1(self):
"""line1
line2
indented line
"""
'''
)
)
result = testdir.runpytest("--fixtures", p)
result.stdout.fnmatch_lines(
textwrap.dedent(
"""\
* fixtures defined from test_show_fixtures_indented_in_class *
fixture1
line1
line2
indented line
"""
)
)
def test_show_fixtures_different_files(self, testdir):
"""
#833: --fixtures only shows fixtures from first file
"""
testdir.makepyfile(
test_a='''
import pytest
@pytest.fixture
def fix_a():
"""Fixture A"""
pass
def test_a(fix_a):
pass
'''
)
testdir.makepyfile(
test_b='''
import pytest
@pytest.fixture
def fix_b():
"""Fixture B"""
pass
def test_b(fix_b):
pass
'''
)
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines(
"""
* fixtures defined from test_a *
fix_a
Fixture A
* fixtures defined from test_b *
fix_b
Fixture B
"""
)
def test_show_fixtures_with_same_name(self, testdir):
testdir.makeconftest(
'''
import pytest
@pytest.fixture
def arg1():
"""Hello World in conftest.py"""
return "Hello World"
'''
)
testdir.makepyfile(
"""
def test_foo(arg1):
assert arg1 == "Hello World"
"""
)
testdir.makepyfile(
'''
import pytest
@pytest.fixture
def arg1():
"""Hi from test module"""
return "Hi"
def test_bar(arg1):
assert arg1 == "Hi"
'''
)
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines(
"""
* fixtures defined from conftest *
arg1
Hello World in conftest.py
* fixtures defined from test_show_fixtures_with_same_name *
arg1
Hi from test module
"""
)
def test_fixture_disallow_twice(self):
"""Test that applying @pytest.fixture twice generates an error (#2334)."""
with pytest.raises(ValueError):
@pytest.fixture
@pytest.fixture
def foo():
raise NotImplementedError()
class TestContextManagerFixtureFuncs:
@pytest.fixture(params=["fixture", "yield_fixture"])
def flavor(self, request, testdir, monkeypatch):
monkeypatch.setenv("PYTEST_FIXTURE_FLAVOR", request.param)
testdir.makepyfile(
test_context="""
import os
import pytest
import warnings
VAR = "PYTEST_FIXTURE_FLAVOR"
if VAR not in os.environ:
warnings.warn("PYTEST_FIXTURE_FLAVOR was not set, assuming fixture")
fixture = pytest.fixture
else:
fixture = getattr(pytest, os.environ[VAR])
"""
)
def test_simple(self, testdir, flavor):
testdir.makepyfile(
"""
from test_context import fixture
@fixture
def arg1():
print("setup")
yield 1
print("teardown")
def test_1(arg1):
print("test1", arg1)
def test_2(arg1):
print("test2", arg1)
assert 0
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(
"""
*setup*
*test1 1*
*teardown*
*setup*
*test2 1*
*teardown*
"""
)
def test_scoped(self, testdir, flavor):
testdir.makepyfile(
"""
from test_context import fixture
@fixture(scope="module")
def arg1():
print("setup")
yield 1
print("teardown")
def test_1(arg1):
print("test1", arg1)
def test_2(arg1):
print("test2", arg1)
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(
"""
*setup*
*test1 1*
*test2 1*
*teardown*
"""
)
def test_setup_exception(self, testdir, flavor):
testdir.makepyfile(
"""
from test_context import fixture
@fixture(scope="module")
def arg1():
pytest.fail("setup")
yield 1
def test_1(arg1):
pass
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(
"""
*pytest.fail*setup*
*1 error*
"""
)
def test_teardown_exception(self, testdir, flavor):
testdir.makepyfile(
"""
from test_context import fixture
@fixture(scope="module")
def arg1():
yield 1
pytest.fail("teardown")
def test_1(arg1):
pass
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(
"""
*pytest.fail*teardown*
*1 passed*1 error*
"""
)
def test_yields_more_than_one(self, testdir, flavor):
testdir.makepyfile(
"""
from test_context import fixture
@fixture(scope="module")
def arg1():
yield 1
yield 2
def test_1(arg1):
pass
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(
"""
*fixture function*
*test_yields*:2*
"""
)
def test_custom_name(self, testdir, flavor):
testdir.makepyfile(
"""
from test_context import fixture
@fixture(name='meow')
def arg1():
return 'mew'
def test_1(meow):
print(meow)
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(["*mew*"])
class TestParameterizedSubRequest:
def test_call_from_fixture(self, testdir):
testdir.makepyfile(
test_call_from_fixture="""
import pytest
@pytest.fixture(params=[0, 1, 2])
def fix_with_param(request):
return request.param
@pytest.fixture
def get_named_fixture(request):
return request.getfixturevalue('fix_with_param')
def test_foo(request, get_named_fixture):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_call_from_fixture.py::test_foo",
"Requested fixture 'fix_with_param' defined in:",
"test_call_from_fixture.py:4",
"Requested here:",
"test_call_from_fixture.py:9",
"*1 error in*",
]
)
def test_call_from_test(self, testdir):
testdir.makepyfile(
test_call_from_test="""
import pytest
@pytest.fixture(params=[0, 1, 2])
def fix_with_param(request):
return request.param
def test_foo(request):
request.getfixturevalue('fix_with_param')
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_call_from_test.py::test_foo",
"Requested fixture 'fix_with_param' defined in:",
"test_call_from_test.py:4",
"Requested here:",
"test_call_from_test.py:8",
"*1 failed*",
]
)
def test_external_fixture(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture(params=[0, 1, 2])
def fix_with_param(request):
return request.param
"""
)
testdir.makepyfile(
test_external_fixture="""
def test_foo(request):
request.getfixturevalue('fix_with_param')
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_external_fixture.py::test_foo",
"",
"Requested fixture 'fix_with_param' defined in:",
"conftest.py:4",
"Requested here:",
"test_external_fixture.py:2",
"*1 failed*",
]
)
def test_non_relative_path(self, testdir):
tests_dir = testdir.mkdir("tests")
fixdir = testdir.mkdir("fixtures")
fixfile = fixdir.join("fix.py")
fixfile.write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture(params=[0, 1, 2])
def fix_with_param(request):
return request.param
"""
)
)
testfile = tests_dir.join("test_foos.py")
testfile.write(
textwrap.dedent(
"""\
from fix import fix_with_param
def test_foo(request):
request.getfixturevalue('fix_with_param')
"""
)
)
tests_dir.chdir()
testdir.syspathinsert(fixdir)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_foos.py::test_foo",
"",
"Requested fixture 'fix_with_param' defined in:",
"{}:4".format(fixfile),
"Requested here:",
"test_foos.py:4",
"*1 failed*",
]
)
# With non-overlapping rootdir, passing tests_dir.
rootdir = testdir.mkdir("rootdir")
rootdir.chdir()
result = testdir.runpytest("--rootdir", rootdir, tests_dir)
result.stdout.fnmatch_lines(
[
"The requested fixture has no parameter defined for test:",
" test_foos.py::test_foo",
"",
"Requested fixture 'fix_with_param' defined in:",
"{}:4".format(fixfile),
"Requested here:",
"{}:4".format(testfile),
"*1 failed*",
]
)
def test_pytest_fixture_setup_and_post_finalizer_hook(testdir):
testdir.makeconftest(
"""
def pytest_fixture_setup(fixturedef, request):
print('ROOT setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
def pytest_fixture_post_finalizer(fixturedef, request):
print('ROOT finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
"""
)
testdir.makepyfile(
**{
"tests/conftest.py": """
def pytest_fixture_setup(fixturedef, request):
print('TESTS setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
def pytest_fixture_post_finalizer(fixturedef, request):
print('TESTS finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
""",
"tests/test_hooks.py": """
import pytest
@pytest.fixture()
def my_fixture():
return 'some'
def test_func(my_fixture):
print('TEST test_func')
assert my_fixture == 'some'
""",
}
)
result = testdir.runpytest("-s")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*TESTS setup hook called for my_fixture from test_func*",
"*ROOT setup hook called for my_fixture from test_func*",
"*TEST test_func*",
"*TESTS finalizer hook called for my_fixture from test_func*",
"*ROOT finalizer hook called for my_fixture from test_func*",
]
)
class TestScopeOrdering:
"""Class of tests that ensure fixtures are ordered based on their scopes (#2405)"""
@pytest.mark.parametrize("variant", ["mark", "autouse"])
def test_func_closure_module_auto(self, testdir, variant, monkeypatch):
"""Semantically identical to the example posted in #2405 when ``use_mark=True``"""
monkeypatch.setenv("FIXTURE_ACTIVATION_VARIANT", variant)
testdir.makepyfile(
"""
import warnings
import os
import pytest
VAR = 'FIXTURE_ACTIVATION_VARIANT'
VALID_VARS = ('autouse', 'mark')
VARIANT = os.environ.get(VAR)
if VARIANT is None or VARIANT not in VALID_VARS:
warnings.warn("{!r} is not in {}, assuming autouse".format(VARIANT, VALID_VARS) )
variant = 'mark'
@pytest.fixture(scope='module', autouse=VARIANT == 'autouse')
def m1(): pass
if VARIANT=='mark':
pytestmark = pytest.mark.usefixtures('m1')
@pytest.fixture(scope='function', autouse=True)
def f1(): pass
def test_func(m1):
pass
"""
)
items, _ = testdir.inline_genitems()
request = FixtureRequest(items[0])
assert request.fixturenames == "m1 f1".split()
def test_func_closure_with_native_fixtures(self, testdir, monkeypatch):
"""Sanity check that verifies the order returned by the closures and the actual fixture execution order:
The execution order may differ because of fixture inter-dependencies.
"""
monkeypatch.setattr(pytest, "FIXTURE_ORDER", [], raising=False)
testdir.makepyfile(
"""
import pytest
FIXTURE_ORDER = pytest.FIXTURE_ORDER
@pytest.fixture(scope="session")
def s1():
FIXTURE_ORDER.append('s1')
@pytest.fixture(scope="package")
def p1():
FIXTURE_ORDER.append('p1')
@pytest.fixture(scope="module")
def m1():
FIXTURE_ORDER.append('m1')
@pytest.fixture(scope='session')
def my_tmpdir_factory():
FIXTURE_ORDER.append('my_tmpdir_factory')
@pytest.fixture
def my_tmpdir(my_tmpdir_factory):
FIXTURE_ORDER.append('my_tmpdir')
@pytest.fixture
def f1(my_tmpdir):
FIXTURE_ORDER.append('f1')
@pytest.fixture
def f2():
FIXTURE_ORDER.append('f2')
def test_foo(f1, p1, m1, f2, s1): pass
"""
)
items, _ = testdir.inline_genitems()
request = FixtureRequest(items[0])
# order of fixtures based on their scope and position in the parameter list
assert (
request.fixturenames == "s1 my_tmpdir_factory p1 m1 f1 f2 my_tmpdir".split()
)
testdir.runpytest()
# actual fixture execution differs: dependent fixtures must be created first ("my_tmpdir")
assert (
pytest.FIXTURE_ORDER == "s1 my_tmpdir_factory p1 m1 my_tmpdir f1 f2".split()
)
def test_func_closure_module(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope='module')
def m1(): pass
@pytest.fixture(scope='function')
def f1(): pass
def test_func(f1, m1):
pass
"""
)
items, _ = testdir.inline_genitems()
request = FixtureRequest(items[0])
assert request.fixturenames == "m1 f1".split()
def test_func_closure_scopes_reordered(self, testdir):
"""Test ensures that fixtures are ordered by scope regardless of the order of the parameters, although
fixtures of same scope keep the declared order
"""
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope='session')
def s1(): pass
@pytest.fixture(scope='module')
def m1(): pass
@pytest.fixture(scope='function')
def f1(): pass
@pytest.fixture(scope='function')
def f2(): pass
class Test:
@pytest.fixture(scope='class')
def c1(cls): pass
def test_func(self, f2, f1, c1, m1, s1):
pass
"""
)
items, _ = testdir.inline_genitems()
request = FixtureRequest(items[0])
assert request.fixturenames == "s1 m1 c1 f2 f1".split()
def test_func_closure_same_scope_closer_root_first(self, testdir):
"""Auto-use fixtures of same scope are ordered by closer-to-root first"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(scope='module', autouse=True)
def m_conf(): pass
"""
)
testdir.makepyfile(
**{
"sub/conftest.py": """
import pytest
@pytest.fixture(scope='package', autouse=True)
def p_sub(): pass
@pytest.fixture(scope='module', autouse=True)
def m_sub(): pass
""",
"sub/__init__.py": "",
"sub/test_func.py": """
import pytest
@pytest.fixture(scope='module', autouse=True)
def m_test(): pass
@pytest.fixture(scope='function')
def f1(): pass
def test_func(m_test, f1):
pass
""",
}
)
items, _ = testdir.inline_genitems()
request = FixtureRequest(items[0])
assert request.fixturenames == "p_sub m_conf m_sub m_test f1".split()
def test_func_closure_all_scopes_complex(self, testdir):
"""Complex test involving all scopes and mixing autouse with normal fixtures"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(scope='session')
def s1(): pass
@pytest.fixture(scope='package', autouse=True)
def p1(): pass
"""
)
testdir.makepyfile(**{"__init__.py": ""})
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope='module', autouse=True)
def m1(): pass
@pytest.fixture(scope='module')
def m2(s1): pass
@pytest.fixture(scope='function')
def f1(): pass
@pytest.fixture(scope='function')
def f2(): pass
class Test:
@pytest.fixture(scope='class', autouse=True)
def c1(self):
pass
def test_func(self, f2, f1, m2):
pass
"""
)
items, _ = testdir.inline_genitems()
request = FixtureRequest(items[0])
assert request.fixturenames == "s1 p1 m1 m2 c1 f2 f1".split()
def test_multiple_packages(self, testdir):
"""Complex test involving multiple package fixtures. Make sure teardowns
are executed in order.
.
└── root
├── __init__.py
├── sub1
│ ├── __init__.py
│ ├── conftest.py
│ └── test_1.py
└── sub2
├── __init__.py
├── conftest.py
└── test_2.py
"""
root = testdir.mkdir("root")
root.join("__init__.py").write("values = []")
sub1 = root.mkdir("sub1")
sub1.ensure("__init__.py")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
from .. import values
@pytest.fixture(scope="package")
def fix():
values.append("pre-sub1")
yield values
assert values.pop() == "pre-sub1"
"""
)
)
sub1.join("test_1.py").write(
textwrap.dedent(
"""\
from .. import values
def test_1(fix):
assert values == ["pre-sub1"]
"""
)
)
sub2 = root.mkdir("sub2")
sub2.ensure("__init__.py")
sub2.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
from .. import values
@pytest.fixture(scope="package")
def fix():
values.append("pre-sub2")
yield values
assert values.pop() == "pre-sub2"
"""
)
)
sub2.join("test_2.py").write(
textwrap.dedent(
"""\
from .. import values
def test_2(fix):
assert values == ["pre-sub2"]
"""
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_class_fixture_self_instance(self, testdir):
"""Check that plugin classes which implement fixtures receive the plugin instance
as self (see #2270).
"""
testdir.makeconftest(
"""
import pytest
def pytest_configure(config):
config.pluginmanager.register(MyPlugin())
class MyPlugin():
def __init__(self):
self.arg = 1
@pytest.fixture(scope='function')
def myfix(self):
assert isinstance(self, MyPlugin)
return self.arg
"""
)
testdir.makepyfile(
"""
class TestClass(object):
def test_1(self, myfix):
assert myfix == 1
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_call_fixture_function_error():
"""Check if an error is raised if a fixture function is called directly (#4545)"""
@pytest.fixture
def fix():
raise NotImplementedError()
with pytest.raises(pytest.fail.Exception):
assert fix() == 1
def test_fixture_param_shadowing(testdir):
"""Parametrized arguments would be shadowed if a fixture with the same name also exists (#5036)"""
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=['a', 'b'])
def argroot(request):
return request.param
@pytest.fixture
def arg(argroot):
return argroot
# This should only be parametrized directly
@pytest.mark.parametrize("arg", [1])
def test_direct(arg):
assert arg == 1
# This should be parametrized based on the fixtures
def test_normal_fixture(arg):
assert isinstance(arg, str)
# Indirect should still work:
@pytest.fixture
def arg2(request):
return 2*request.param
@pytest.mark.parametrize("arg2", [1], indirect=True)
def test_indirect(arg2):
assert arg2 == 2
"""
)
# Only one test should have run
result = testdir.runpytest("-v")
result.assert_outcomes(passed=4)
result.stdout.fnmatch_lines(["*::test_direct[[]1[]]*"])
result.stdout.fnmatch_lines(["*::test_normal_fixture[[]a[]]*"])
result.stdout.fnmatch_lines(["*::test_normal_fixture[[]b[]]*"])
result.stdout.fnmatch_lines(["*::test_indirect[[]1[]]*"])
def test_fixture_named_request(testdir):
testdir.copy_example("fixtures/test_fixture_named_request.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*'request' is a reserved word for fixtures, use another name:",
" *test_fixture_named_request.py:5",
]
)
def test_fixture_duplicated_arguments():
"""Raise error if there are positional and keyword arguments for the same parameter (#1682)."""
with pytest.raises(TypeError) as excinfo:
@pytest.fixture("session", scope="session")
def arg(arg):
pass
assert (
str(excinfo.value)
== "The fixture arguments are defined as positional and keyword: scope. "
"Use only keyword arguments."
)
def test_fixture_with_positionals():
"""Raise warning, but the positionals should still works (#1682)."""
from _pytest.deprecated import FIXTURE_POSITIONAL_ARGUMENTS
with pytest.warns(pytest.PytestDeprecationWarning) as warnings:
@pytest.fixture("function", [0], True)
def fixture_with_positionals():
pass
assert str(warnings[0].message) == str(FIXTURE_POSITIONAL_ARGUMENTS)
assert fixture_with_positionals._pytestfixturefunction.scope == "function"
assert fixture_with_positionals._pytestfixturefunction.params == (0,)
assert fixture_with_positionals._pytestfixturefunction.autouse
def test_indirect_fixture_does_not_break_scope(testdir):
"""Ensure that fixture scope is respected when using indirect fixtures (#570)"""
testdir.makepyfile(
"""
import pytest
instantiated = []
@pytest.fixture(scope="session")
def fixture_1(request):
instantiated.append(("fixture_1", request.param))
@pytest.fixture(scope="session")
def fixture_2(request):
instantiated.append(("fixture_2", request.param))
scenarios = [
("A", "a1"),
("A", "a2"),
("B", "b1"),
("B", "b2"),
("C", "c1"),
("C", "c2"),
]
@pytest.mark.parametrize(
"fixture_1,fixture_2", scenarios, indirect=["fixture_1", "fixture_2"]
)
def test_create_fixtures(fixture_1, fixture_2):
pass
def test_check_fixture_instantiations():
assert instantiated == [
('fixture_1', 'A'),
('fixture_2', 'a1'),
('fixture_2', 'a2'),
('fixture_1', 'B'),
('fixture_2', 'b1'),
('fixture_2', 'b2'),
('fixture_1', 'C'),
('fixture_2', 'c1'),
('fixture_2', 'c2'),
]
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=7)
def test_fixture_parametrization_nparray(testdir):
pytest.importorskip("numpy")
testdir.makepyfile(
"""
from numpy import linspace
from pytest import fixture
@fixture(params=linspace(1, 10, 10))
def value(request):
return request.param
def test_bug(value):
assert value == value
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=10)
def test_fixture_arg_ordering(testdir):
"""
This test describes how fixtures in the same scope but without explicit dependencies
between them are created. While users should make dependencies explicit, often
they rely on this order, so this test exists to catch regressions in this regard.
See #6540 and #6492.
"""
p1 = testdir.makepyfile(
"""
import pytest
suffixes = []
@pytest.fixture
def fix_1(): suffixes.append("fix_1")
@pytest.fixture
def fix_2(): suffixes.append("fix_2")
@pytest.fixture
def fix_3(): suffixes.append("fix_3")
@pytest.fixture
def fix_4(): suffixes.append("fix_4")
@pytest.fixture
def fix_5(): suffixes.append("fix_5")
@pytest.fixture
def fix_combined(fix_1, fix_2, fix_3, fix_4, fix_5): pass
def test_suffix(fix_combined):
assert suffixes == ["fix_1", "fix_2", "fix_3", "fix_4", "fix_5"]
"""
)
result = testdir.runpytest("-vv", str(p1))
assert result.ret == 0
def test_yield_fixture_with_no_value(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(name='custom')
def empty_yield():
if False:
yield
def test_fixt(custom):
pass
"""
)
expected = "E ValueError: custom did not yield a value"
result = testdir.runpytest()
result.assert_outcomes(error=1)
result.stdout.fnmatch_lines([expected])
assert result.ret == ExitCode.TESTS_FAILED
|
markshao/pytest
|
testing/python/fixtures.py
|
Python
|
mit
| 130,347
|
[
"VisIt"
] |
f11ad568202bbf9705b8fa897d734eed6de298119a6a61bbe171a76f16d77766
|
"""
Handling the download of the shifter Proxy
"""
__RCSID__ = "$Id$"
import os
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
def getShifterProxy(shifterType, fileName=False):
"""
This method returns a shifter's proxy
:param shifterType: ProductionManager / DataManager...
"""
if fileName:
mkDir(os.path.dirname(fileName))
opsHelper = Operations()
userName = opsHelper.getValue(cfgPath('Shifter', shifterType, 'User'), '')
if not userName:
return S_ERROR("No shifter User defined for %s" % shifterType)
result = Registry.getDNForUsername(userName)
if not result['OK']:
return result
userDN = result['Value'][0]
result = Registry.findDefaultGroupForDN(userDN)
if not result['OK']:
return result
defaultGroup = result['Value']
userGroup = opsHelper.getValue(cfgPath('Shifter', shifterType, 'Group'), defaultGroup)
vomsAttr = Registry.getVOMSAttributeForGroup(userGroup)
if vomsAttr:
gLogger.info("Getting VOMS [%s] proxy for shifter %s@%s (%s)" % (vomsAttr, userName,
userGroup, userDN))
result = gProxyManager.downloadVOMSProxyToFile(userDN, userGroup,
filePath=fileName,
requiredTimeLeft=86400,
cacheTime=86400)
else:
gLogger.info("Getting proxy for shifter %s@%s (%s)" % (userName, userGroup, userDN))
result = gProxyManager.downloadProxyToFile(userDN, userGroup,
filePath=fileName,
requiredTimeLeft=86400,
cacheTime=86400)
if not result['OK']:
return result
chain = result['chain']
fileName = result['Value']
return S_OK({'DN': userDN,
'username': userName,
'group': userGroup,
'chain': chain,
'proxyFile': fileName})
def setupShifterProxyInEnv(shifterType, fileName=False):
"""
Return the shifter's proxy and set it up as the default
proxy via changing the environment.
This method returns a shifter's proxy
:param shifterType: ProductionManager / DataManager...
"""
result = getShifterProxy(shifterType, fileName)
if not result['OK']:
return result
proxyDict = result['Value']
os.environ['X509_USER_PROXY'] = proxyDict['proxyFile']
return result
|
andresailer/DIRAC
|
Core/Utilities/Shifter.py
|
Python
|
gpl-3.0
| 2,800
|
[
"DIRAC"
] |
18de2e90628f6669d80a82142f0dce74cf650c7fa8b112b881638732d9553eb0
|
"""KNRM model."""
import keras
import tensorflow as tf
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine import hyper_spaces
class KNRM(BaseModel):
"""
KNRM model.
Examples:
>>> model = KNRM()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 10
>>> model.params['embedding_trainable'] = True
>>> model.params['kernel_num'] = 11
>>> model.params['sigma'] = 0.1
>>> model.params['exact_sigma'] = 0.001
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls):
"""Get default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(
name='kernel_num',
value=11,
hyper_space=hyper_spaces.quniform(low=5, high=20),
desc="The number of RBF kernels."
))
params.add(Param(
name='sigma',
value=0.1,
hyper_space=hyper_spaces.quniform(
low=0.01, high=0.2, q=0.01),
desc="The `sigma` defines the kernel width."
))
params.add(Param(
name='exact_sigma', value=0.001,
desc="The `exact_sigma` denotes the `sigma` "
"for exact match."
))
return params
def build(self):
"""Build model."""
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
q_embed = embedding(query)
d_embed = embedding(doc)
mm = keras.layers.Dot(axes=[2, 2], normalize=True)([q_embed, d_embed])
KM = []
for i in range(self._params['kernel_num']):
mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (
self._params['kernel_num'] - 1) - 1.0
sigma = self._params['sigma']
if mu > 1.0:
sigma = self._params['exact_sigma']
mu = 1.0
mm_exp = self._kernel_layer(mu, sigma)(mm)
mm_doc_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 2))(mm_exp)
mm_log = keras.layers.Activation(tf.math.log1p)(mm_doc_sum)
mm_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 1))(mm_log)
KM.append(mm_sum)
phi = keras.layers.Lambda(lambda x: tf.stack(x, 1))(KM)
out = self._make_output_layer()(phi)
self._backend = keras.Model(inputs=[query, doc], outputs=[out])
@classmethod
def _kernel_layer(cls, mu: float, sigma: float) -> keras.layers.Layer:
"""
Gaussian kernel layer in KNRM.
:param mu: Float, mean of the kernel.
:param sigma: Float, sigma of the kernel.
:return: `keras.layers.Layer`.
"""
def kernel(x):
return tf.math.exp(-0.5 * (x - mu) * (x - mu) / sigma / sigma)
return keras.layers.Activation(kernel)
|
faneshion/MatchZoo
|
matchzoo/models/knrm.py
|
Python
|
apache-2.0
| 3,057
|
[
"Gaussian"
] |
da69b6d277da79ff48ba92374c3d1148ef00b090f75790b0993ab50d203f4d21
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from Plugins.Extensions.OpenWebif.local import tstrings
from json import dumps
from Plugins.Extensions.OpenWebif.controllers.views.ajax.renderevtblock import renderEvtBlock
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.519162
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/ajax/multiepg.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class multiepg(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(multiepg, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def channelsInBouquet(self, **KWS):
## CHEETAH: generated from #block channelsInBouquet at line 50, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<thead>
<tr>
''')
for sname, eventlist in VFN(VFFSL(SL,"events",True),"iteritems",False)(): # generated from line 53, col 2
write(u'''\t<td class="border"><div class="service"><img src="''')
_v = VFFSL(SL,"picons",True)[VFFSL(SL,"sname",True)] # u'$(picons[$sname])' on line 54, col 52
if _v is not None: write(_filter(_v, rawExpr=u'$(picons[$sname])')) # from line 54, col 52.
write(u'''" /> ''')
_v = VFFSL(SL,"sname",True) # u'$sname' on line 54, col 74
if _v is not None: write(_filter(_v, rawExpr=u'$sname')) # from line 54, col 74.
write(u'''</div></td>
''')
write(u'''</tr>
</thead>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<style>
\ttable { font-family: Verdana; font-size: 11px; }
\ttr { vertical-align: top }
\t.service { font-weight: bold; font-size: 12px; color:#fff; background-color: #1c47ae; line-height:30px; padding: 3px; white-space: nowrap; overflow: hidden; width: 184px}
\t.service img { width:50px; height:30px; float:left; margin-right:10px; }
\t.title { font-weight: bold; color: #061c37; }
\t.desc { font-size: 10px; color: #176093; }
\t.even { background-color: #dfeffc; }
\t.border { border-right: 1px solid #4297d7; }
\t.event { cursor: pointer; width: 190px; overflow:hidden; }
\t.bq { background-color: #1c478e; font-size: 11px; font-weight: bold; color: #fff; padding: 2px 4px; line-height: 18px; cursor: pointer; white-space: nowrap; display: inline-block; margin: 1px 1px 0px 0px;}
\t.bq.selected { color: #A9D1FA; }
\t.plus { background-color: #dfeffc; font-size: 13px; font-weight: bold; color: #1c478e; padding: 2px 4px; line-height: 21px; cursor: pointer; white-space: nowrap; }
\t.plus.selected { color: #ea7409; }
\t.timer { color: #f00; font-weight: bold; font-size: 10px; }
\t.timer.disabled { color: #f80; }
\t#eventdescription { width: 375px; height: auto; position: fixed; top: 205px; left: 350px; z-index: 1000; display: none; overflow: auto; }
.fht-table,.fht-table thead,.fht-table tfoot,.fht-table tbody,.fht-table tr,.fht-table th,.fht-table td{font-size:100%;font:inherit;vertical-align:top;margin:0;padding:0}
.fht-table{border-collapse:collapse;border-spacing:0}
.fht-table-wrapper,.fht-table-wrapper .fht-thead,.fht-table-wrapper .fht-tfoot,.fht-table-wrapper .fht-fixed-column .fht-tbody,.fht-table-wrapper .fht-fixed-body .fht-tbody,.fht-table-wrapper .fht-tbody{overflow:hidden;position:relative}
.fht-table-wrapper .fht-fixed-body .fht-tbody,.fht-table-wrapper .fht-tbody{overflow:auto}
.fht-table-wrapper .fht-table .fht-cell{overflow:hidden;height:1px}
.fht-table-wrapper .fht-fixed-column,.fht-table-wrapper .fht-fixed-body{top:0;left:0;position:absolute}
.fht-table-wrapper .fht-fixed-column{z-index:1}
}
</style>
<table style="margin:0">
<tr>
''')
for slot in range(0,7): # generated from line 34, col 1
write(u''' <td class="plus ''')
if VFFSL(SL,"slot",True)==VFFSL(SL,"day",True) : # generated from line 35, col 21
_v = 'selected'
if _v is not None: write(_filter(_v))
else:
_v = ''
if _v is not None: write(_filter(_v))
write(u'''" js:day="''')
_v = VFFSL(SL,"slot",True) # u'$(slot)' on line 35, col 72
if _v is not None: write(_filter(_v, rawExpr=u'$(slot)')) # from line 35, col 72.
write(u'''">''')
_v = VFFSL(SL,"tstrings",True)[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))] # u'$tstrings[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))]' on line 35, col 81
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))]')) # from line 35, col 81.
write(u'''</td>
''')
write(u'''</tr>
</table>
<table>
<tr>
''')
for bq in VFFSL(SL,"bouquets",True): # generated from line 42, col 1
write(u'''<td class="bq ''')
if VFFSL(SL,"bq",True)[0]==VFFSL(SL,"bref",True) : # generated from line 43, col 15
_v = 'selected'
if _v is not None: write(_filter(_v))
else:
_v = ''
if _v is not None: write(_filter(_v))
write(u'''" js:ref="''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"bq",True)[0]) # u'$quote($bq[0])' on line 43, col 68
if _v is not None: write(_filter(_v, rawExpr=u'$quote($bq[0])')) # from line 43, col 68.
write(u'''">''')
_v = VFFSL(SL,"bq",True)[1] # u'$bq[1]' on line 43, col 84
if _v is not None: write(_filter(_v, rawExpr=u'$bq[1]')) # from line 43, col 84.
write(u'''</td>
''')
write(u'''</tr>
</table>
''')
renderEventBlock = VFFSL(SL,"renderEvtBlock",False)()
write(u'''<table cellpadding="0" cellspacing="0" id="TBL1">
''')
self.channelsInBouquet(trans=trans)
write(u'''<tbody>
''')
hasEvents = False
for slot in range(0,12): # generated from line 61, col 2
write(u'''<tr class="''')
_v = VFFSL(SL,"slot",True)%2 and 'odd' or 'even' # u"$(slot%2 and 'odd' or 'even')" on line 62, col 12
if _v is not None: write(_filter(_v, rawExpr=u"$(slot%2 and 'odd' or 'even')")) # from line 62, col 12.
write(u'''">
''')
for sname, eventlist in VFN(VFFSL(SL,"events",True),"iteritems",False)(): # generated from line 63, col 2
write(u'''<td class="border">
''')
for event in VFFSL(SL,"eventlist",True)[VFFSL(SL,"slot",True)]: # generated from line 65, col 2
write(u'''\t\t''')
_v = VFN(VFFSL(SL,"renderEventBlock",True),"render",False)(VFFSL(SL,"event",True)) # u'$renderEventBlock.render($event)' on line 66, col 3
if _v is not None: write(_filter(_v, rawExpr=u'$renderEventBlock.render($event)')) # from line 66, col 3.
write(u'''
''')
hasEvents = True
write(u'''</td>
''')
write(u'''</tr>
''')
write(u'''</tbody>
</table>
<div id="eventdescription"></div>
<script>
var picons = ''')
_v = VFFSL(SL,"dumps",False)(VFFSL(SL,"picons",True)) # u'$dumps($picons)' on line 78, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$dumps($picons)')) # from line 78, col 14.
write(u''';
var reloadTimers = false;
$(".bq").click(function() {
var id = $(this).attr("js:ref");
$("#tvcontent").html(loadspinner).load(\'ajax/multiepg?bref=\'+id);
});
$(".event").click(function() {
var id = $(this).attr("js:id");
var ref = $(this).attr("js:ref");
$("#eventdescription").load(\'ajax/event?idev=\'+id+\'&sref=\'+escape(ref), function() {
\t\t$("#eventdescription").show(200).draggable( { handle: ".handle" } );
});
});
$(".plus").click(function() {
\tvar day = $(this).attr("js:day");
\t$("#tvcontent").html(loadspinner).load(\'ajax/multiepg?bref=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"bref",True)) # u'${quote($bref)}' on line 93, col 62
if _v is not None: write(_filter(_v, rawExpr=u'${quote($bref)}')) # from line 93, col 62.
write(u'''&day=\'+day);
});
if(!timeredit_initialized)
\t$(\'#editTimerForm\').load(\'/ajax/edittimer\');
</script>
<script type="text/javascript" src="js/jquery.fixedheadertable.min.js"></script>
<script>
$(function() {
$(\'#TBL1\').fixedHeaderTable({
\tfooter: true,
\tcloneHeadToFoot: true,
\taltClass: \'odd\',
\tautoShow: true
});
});
</script>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_multiepg= 'respond'
## END CLASS DEFINITION
if not hasattr(multiepg, '_initCheetahAttributes'):
templateAPIClass = getattr(multiepg, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(multiepg)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=multiepg()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/ajax/multiepg.py
|
Python
|
gpl-2.0
| 12,887
|
[
"VisIt"
] |
cbb05c6ccc3f655b10621e1c9bafe1c6ed0b47596ab1076c735eddf3f6ddee8d
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
# Standard module
import math
import operator
# Third-party modules
import numpy
# =============================================================================
# Classes
# =============================================================================
class AtomError(Exception):
"""
Exeption class for the Atom class.
"""
pass
class ChainError(Exception):
"""
Exeption class for the Chain class
"""
pass
class Atom:
"""
Class for atoms in PDB or PDBx/mmCIF format.
"""
def __init__(self, ident=0, name=None, resname=None, chain=None, resid=0,
x=0.0, y=0.0, z=0.0, model=None):
"""default constructor"""
self.id = ident
self.name = name
self.resname = resname
self.chain = chain
self.resid = resid
self.x = x
self.y = y
self.z = z
self.model = model
@classmethod
def read_from_PDB(cls, line):
"""
Constructor from a PDB file line.
Parameters
----------
line : str
Line from a PDB file starting with 'ATOM' or 'HETATM'.
Raises
------
AtomError
If line is too short.
Notes
-----
PDB format documentation:
http://www.wwpdb.org/documentation/format33/v3.3.html
"""
if len(line) < 55:
raise AtomError("ATOM line too short:\n{0}".format(line))
ident = int(line[6:11].strip())
name = line[12:16].strip()
resname = line[17:20].strip()
chain = line[21:22].strip()
resid = int(line[22:26].strip())
x = float(line[30:38].strip())
y = float(line[38:46].strip())
z = float(line[46:54].strip())
return cls(ident, name, resname, chain, resid, x, y, z)
@classmethod
def read_from_PDBx(cls, line, fields):
"""
Constructor from a PDBx/mmCIF file line
Parameters
----------
line : str
Line from a PDBx/mmCIF file starting with 'ATOM' or 'HETATM'.
fields : list
List of str containing fields of data for PDBx/mmCIF format.
Notes
-----
Format documentation:
http://mmcif.wwpdb.org/docs/tutorials/content/atomic-description.html
"""
try:
dic = dict(zip(fields, line.split()))
except:
raise AtomError("Something went wrong in reading\n{0}".format(line))
try:
ident = int(dic['id'])
name = dic['label_atom_id']
resname = dic['label_comp_id']
chain = dic['label_asym_id']
resid = int(dic['label_seq_id'])
x = float(dic['Cartn_x'])
y = float(dic['Cartn_y'])
z = float(dic['Cartn_z'])
model = dic['pdbx_PDB_model_num']
except:
raise AtomError("Something went wrong in data convertion\n{0}"
.format(dic))
return cls(ident, name, resname, chain, resid, x, y, z, model)
@classmethod
def read_from_xtc(cls, atm):
"""
Constructor from a .xtc mdanalysis selection.
Parameters
----------
atm : atom object of MDAnlysis
"""
x, y, z = atm.position
return cls(atm.id, atm.name, atm.resname, "", atm.resid, x, y, z)
def __repr__(self):
"""
Atom representation.
"""
return 'atom {:4d} {:4s} in {:4d} {:3s}' \
.format(self.id, self.name, self.resid, self.resname)
def format(self):
"""
Atom displayed in PDB format.
"""
return '%-6s%5d %4s%1s%3s %1s%4d%1s %8.3f%8.3f%8.3f%6.2f%6.2f %2s%2s' \
% ('ATOM ', self.id, self.name, ' ', self.resname, self.chain,
self.resid, '', self.x, self.y, self.z,
0.0, 0.0, ' ', ' ')
@property
def coords(self):
"""
Return atom coordinates.
"""
return [self.x, self.y, self.z]
@coords.setter
def coords(self, pos):
"""
Set the cartesian coordinates of the atom.
Parameters
----------
pos: a list or numpy array of 3 elements
"""
self.x, self.y, self.z = pos
class Chain:
"""
Class to handle PDB chain
"""
def __init__(self):
"""
Constructor
"""
self.name = ""
self.model = ""
self.atoms = []
def __repr__(self):
"""
Representation
"""
return "Chain {0} / model {1}: {2} atoms".format(self.name,
self.model,
len(self.atoms))
def __getitem__(self, i):
return self.atoms[i]
def add_atom(self, atom):
"""
Add atom.
Parameters
----------
atom : object from Atom class
Atom to be added to chain.
Raises
------
ChainError
If the chain has several names.
"""
# set chain name when first atom is stored
if not self.atoms:
self.name = atom.chain
# check that chain name is always the same
elif self.name != atom.chain:
raise ChainError("Several chains are in the same structure")
# add atom to structure
self.atoms.append(atom)
def set_model(self, model):
"""
Set model number.
Parameters
----------
model : str
Model identifier.
"""
self.model = model
def size(self):
"""
Get number of atoms.
"""
return len(self.atoms)
def set_coordinates(self, positions):
"""
Update the coordinates of all atoms in a chain.
Parameters
----------
positions : a 2D numpy array with a shape of (number of atoms * 3)
Raises
------
TypeError
If positions doesn't have the right shape
"""
if numpy.shape(positions) != (self.size(), 3):
raise ValueError("Coordinates array doesn't have the good shape.")
for atm, coords in zip(self.atoms, positions):
atm.coords = coords
def get_phi_psi_angles(self):
"""
Compute phi and psi angles.
Returns
-------
phi_psi_angles : dict
Dict with residue number (int) as keys
and a ``{'phi' : (float), 'psi' : (float)}`` dictionnary as values.
Examples
--------
>>> lines = ("ATOM 840 C ARG B 11 22.955 23.561 -4.012 1.00 28.07 C ",
... "ATOM 849 N SER B 12 22.623 24.218 -2.883 1.00 24.77 N ",
... "ATOM 850 CA SER B 12 22.385 23.396 -1.637 1.00 21.99 C ",
... "ATOM 851 C SER B 12 21.150 24.066 -0.947 1.00 32.67 C ",
... "ATOM 855 N ILE B 13 20.421 23.341 -0.088 1.00 30.25 N ")
>>>
>>> import pbxplore as pbx
>>> ch = pbx.structure.structure.Chain()
>>> for line in lines:
... at = pbx.structure.structure.Atom()
... at.read_from_PDB(line)
... ch.add_atom(at)
...
>>> print(ch.get_phi_psi_angles())
{11: {'phi': None, 'psi': None}, 12: {'phi': -139.77684605036447, 'psi': 157.94348570201197}, 13: {'phi': None, 'psi': None}}
"""
# extract backbone atoms
backbone = {}
for atom in self.atoms:
if atom.name in ["CA", "C", "O", "N"]:
resid = atom.resid
if resid in backbone:
backbone[resid][atom.name] = atom
else:
backbone[resid] = {atom.name: atom}
# get dihedrals
phi_psi_angles = {}
for res in sorted(backbone):
# phi: angle between C(i-1) - N(i) - CA(i) - C(i)
try:
phi = get_dihedral(backbone[res-1]["C" ].coords,
backbone[res ]["N" ].coords,
backbone[res ]["CA"].coords,
backbone[res ]["C" ].coords)
except:
phi = None
# psi: angle between N(i) - CA(i) - C(i) - N(i+1)
try:
psi = get_dihedral(backbone[res ]["N" ].coords,
backbone[res ]["CA"].coords,
backbone[res ]["C" ].coords,
backbone[res+1]["N" ].coords)
except:
psi = None
# print(res, phi, psi)
phi_psi_angles[res] = {"phi": phi, "psi": psi}
return phi_psi_angles
# =============================================================================
# Functions
# =============================================================================
def get_dihedral(atomA, atomB, atomC, atomD):
"""
Compute dihedral angle between 4 atoms (A, B, C, D).
Parameters
----------
atomA : list
Coordinates of atom A as a list or tuple of floats [x, y, z].
atomB : list
Coordinates of atom B as a list or tuple of floats [x, y, z].
atomC : list
Coordinates of atom C as a list or tuple of floats [x, y, z].
atomD : list
Coordinates of atom D as a list or tuple of floats [x, y, z].
Returns
-------
torsion : float
Torsion angle defined by the atoms A, B, C and D. Angle is defined
in degrees in the range -180, +180.
Notes
-----
This function is on purpose not part of any class to ease its reusability.
Examples
--------
>>> atom1 = (-1.918, -6.429, -7.107)
>>> atom2 = (-2.609, -5.125, -7.305)
>>> atom3 = (-4.108, -5.392, -7.331)
>>> atom4 = (-4.469, -6.494, -7.911)
>>> get_dihedral(atom1, atom2, atom3, atom4)
-36.8942888266
"""
# vectors
AB = list(map(operator.sub, atomB, atomA))
BC = list(map(operator.sub, atomC, atomB))
CD = list(map(operator.sub, atomD, atomC))
# normal vectors
n1 = []
n1.append(((AB[1] * BC[2]) - (AB[2] * BC[1])))
n1.append(((AB[2] * BC[0]) - (AB[0] * BC[2])))
n1.append(((AB[0] * BC[1]) - (AB[1] * BC[0])))
n2 = []
n2.append(((BC[1] * CD[2]) - (BC[2] * CD[1])))
n2.append(((BC[2] * CD[0]) - (BC[0] * CD[2])))
n2.append(((BC[0] * CD[1]) - (BC[1] * CD[0])))
n1 = numpy.array(n1)
n2 = numpy.array(n2)
# normalize normal vectors
n1 /= numpy.sqrt(n1.dot(n1))
n2 /= numpy.sqrt(n2.dot(n2))
# angle between normals
cosine = n1.dot(n2)
try:
torsion = math.acos(cosine)
except:
cosine = int(cosine) # +0.0001
torsion = math.acos(cosine)
# convert radion to degree
torsion = torsion * 180.0 / math.pi
# find if the torsion is clockwise or counterclockwise
# if numpy.sum(n1 * CD) < 0.0:
if numpy.dot(n1, CD) < 0.0:
torsion = 360 - torsion
if torsion == 360.0:
torsion = 0.0
# get range -180 / +180
if torsion > 180.0:
torsion = torsion - 360
if torsion < -180.0:
torsion = torsion + 360
return torsion
|
jbarnoud/PBxplore
|
pbxplore/structure/structure.py
|
Python
|
mit
| 11,538
|
[
"MDAnalysis"
] |
de91ed771397a8374a32efec72ce0fd9d3e16ffb0439b7b89adcf6e076db190b
|
__author__ = 'Harsh Daftary'
try:
import requests
import json
except ImportError:
print("requests and json libraries are required, but not found.")
exit(1)
from functools import wraps
class ApiError(Exception):
pass
class GoDebianApi(object):
def __init__(self, host="http://go.debian.net/"):
"""
:param host: by default it will use go.debian.net for generating preview and short urls
use host = http://deb.li/ if you want in that format
json api url is deb.li/rpc/json
if you want to change it then subclass this class and override __init__ to make your changes.
:return None
"""
self.api_url = "http://deb.li/rpc/json"
self.host = host
self.preview = host + "p/%s"
self.headers = {'Content-type': 'application/json'}
self.check_ip_white_list()
def _api_call(func):
@wraps(func)
def _tmp(self, *args, **kwargs):
function_name = func.__name__
data = {'method': function_name, 'params': args, 'id': "jsonrpc"}
r = requests.post(self.api_url, headers=self.headers, data=json.dumps(data))
#print(r.status_code)
if r.status_code == 200:
resp = r.json()
if resp.get('result', False):
return self.host + resp.get('result')
else:
raise ApiError(resp.get('error', "Some error occurred"))
else:
raise ApiError("May be your host is not whitelisted in the api, visit https://wiki.debian.org/deb.li for more details.")
return _tmp
@_api_call
def add_url(self, url):
"""
:param url: Provides shortened link for given URL
repeated URLs don't get different Keys.
:return str: shortened URL
"""
pass
def get_preview_url(self, key):
"""
:param key: Returns preview URL for provided key
:return str
"""
return self.preview % key
@_api_call
def get_url(self, key):
"""
:param key: Enter the key to get associated URL
Get key from following format : http://deb.li/p/<key>
:return str: URL associated
"""
pass
@_api_call
def add_static_url(self, url, keyword):
"""
:param url: Url to be shortened
:param keyword: Static keyword against which url needs to be stored
example : go.debian.net/<keyword>
:return:
"""
pass
@_api_call
def update_static_url(self, url, keyword):
"""
:param url: new url
:param keyword: Static keyword against which url needs to be stored
example : go.debian.net/<keyword>
:return:
"""
def check_ip_white_list(self):
try:
_ = self.add_url("http://www.debian.org")
except Exception as e:
print("Exception occured")
print(e)
|
ninjatrench/GoDebian_api
|
GoDebian/api.py
|
Python
|
mit
| 3,025
|
[
"VisIt"
] |
ce3b1f3d3c66a93a57024166a9244227c45b48a181648da83b071eace82396bb
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
from friendlist import FriendList
'''
Created on Dec 1, 2011
@author: thygrrr
'''
from PyQt4 import QtCore, QtGui, QtNetwork, QtWebKit
from types import IntType, FloatType, ListType, DictType
from client import logger, ClientState, MUMBLE_URL, WEBSITE_URL, WIKI_URL, \
FORUMS_URL, UNITDB_URL, SUPPORT_URL, TICKET_URL, GAME_PORT_DEFAULT, LOBBY_HOST, \
LOBBY_PORT, LOCAL_REPLAY_PORT, STEAMLINK_URL
import util
import fa
import secondaryServer
import json
import sys
import replays
import time
import os
import random
import notificatation_system as ns
try:
from profile import playerstats
except:
pass
class ClientOutdated(StandardError):
pass
FormClass, BaseClass = util.loadUiType("client/client.ui")
class mousePosition(object):
def __init__(self, parent):
self.parent = parent
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
def computeMousePosition(self, pos):
self.onLeftEdge = pos.x() < 8
self.onRightEdge = pos.x() > self.parent.size().width() - 8
self.onTopEdge = pos.y() < 8
self.onBottomEdge = pos.y() > self.parent.size().height() - 8
self.onTopLeftEdge = self.onTopEdge and self.onLeftEdge
self.onBottomLeftEdge = self.onBottomEdge and self.onLeftEdge
self.onTopRightEdge = self.onTopEdge and self.onRightEdge
self.onBottomRightEdge = self.onBottomEdge and self.onRightEdge
self.onEdges = self.onLeftEdge or self.onRightEdge or self.onTopEdge or self.onBottomEdge
def resetToFalse(self):
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
def isOnEdge(self):
return self.onEdges
class ClientWindow(FormClass, BaseClass):
'''
This is the main lobby client that manages the FAF-related connection and data,
in particular players, games, ranking, etc.
Its UI also houses all the other UIs for the sub-modules.
'''
topWidget = QtGui.QWidget()
#These signals are emitted when the client is connected or disconnected from FAF
connected = QtCore.pyqtSignal()
disconnected = QtCore.pyqtSignal()
#This signal is emitted when the client is done rezising
doneresize = QtCore.pyqtSignal()
#These signals notify connected modules of game state changes (i.e. reasons why FA is launched)
viewingReplay = QtCore.pyqtSignal(QtCore.QUrl)
#Game state controls
gameEnter = QtCore.pyqtSignal()
gameExit = QtCore.pyqtSignal()
#These signals propagate important client state changes to other modules
statsInfo = QtCore.pyqtSignal(dict)
tourneyTypesInfo = QtCore.pyqtSignal(dict)
tutorialsInfo = QtCore.pyqtSignal(dict)
tourneyInfo = QtCore.pyqtSignal(dict)
modInfo = QtCore.pyqtSignal(dict)
gameInfo = QtCore.pyqtSignal(dict)
modVaultInfo = QtCore.pyqtSignal(dict)
coopInfo = QtCore.pyqtSignal(dict)
newGame = QtCore.pyqtSignal(str)
avatarList = QtCore.pyqtSignal(list)
playerAvatarList = QtCore.pyqtSignal(dict)
usersUpdated = QtCore.pyqtSignal(list)
localBroadcast = QtCore.pyqtSignal(str, str)
publicBroadcast = QtCore.pyqtSignal(str)
autoJoin = QtCore.pyqtSignal(list)
channelsUpdated = QtCore.pyqtSignal(list)
featuredModManager = QtCore.pyqtSignal(str)
featuredModManagerInfo = QtCore.pyqtSignal(dict)
replayVault = QtCore.pyqtSignal(dict)
coopLeaderBoard = QtCore.pyqtSignal(dict)
ladderMapsList = QtCore.pyqtSignal(dict)
#These signals are emitted whenever a certain tab is activated
showReplays = QtCore.pyqtSignal()
showMaps = QtCore.pyqtSignal()
showGames = QtCore.pyqtSignal()
showTourneys = QtCore.pyqtSignal()
showLadder = QtCore.pyqtSignal()
showChat = QtCore.pyqtSignal()
showGalaxyWar = QtCore.pyqtSignal()
showMods = QtCore.pyqtSignal()
showCoop = QtCore.pyqtSignal()
joinGameFromUser = QtCore.pyqtSignal(str)
joinReplayFromUser = QtCore.pyqtSignal(str)
joinGameFromURL = QtCore.pyqtSignal(str)
joinReplayFromURL = QtCore.pyqtSignal(str)
# for the auto join ranked
rankedGameAeon = QtCore.pyqtSignal(bool)
rankedGameCybran = QtCore.pyqtSignal(bool)
rankedGameSeraphim = QtCore.pyqtSignal(bool)
rankedGameUEF = QtCore.pyqtSignal(bool)
rankedGameRandom = QtCore.pyqtSignal(bool)
def __init__(self, *args, **kwargs):
BaseClass.__init__(self, *args, **kwargs)
logger.debug("Client instantiating")
# Hook to Qt's application management system
QtGui.QApplication.instance().aboutToQuit.connect(self.cleanup)
#Init and wire the TCP Network socket to communicate with faforever.com
self.socket = QtNetwork.QTcpSocket()
self.socket.readyRead.connect(self.readFromServer)
self.socket.disconnected.connect(self.disconnectedFromServer)
self.socket.error.connect(self.socketError)
self.blockSize = 0
self.uniqueId = None
self.udpTest = False
try:
self.profile = playerstats.Statpage(self)
except:
pass
self.sendFile = False
self.progress = QtGui.QProgressDialog()
self.progress.setMinimum(0)
self.progress.setMaximum(0)
#Tray icon
self.tray = QtGui.QSystemTrayIcon()
self.tray.setIcon(util.icon("client/tray_icon.png"))
self.tray.show()
self.state = ClientState.NONE
self.session = None
#Timer for resize events
self.resizeTimer = QtCore.QTimer(self)
self.resizeTimer.timeout.connect(self.resized)
self.preferedSize = 0
#Process used to run Forged Alliance (managed in module fa)
fa.exe.instance.started.connect(self.startedFA)
fa.exe.instance.finished.connect(self.finishedFA)
fa.exe.instance.error.connect(self.errorFA)
self.gameInfo.connect(fa.exe.instance.processGameInfo)
#Local Replay Server (and relay)
self.replayServer = fa.replayserver.ReplayServer(self)
#Local Relay Server
self.relayServer = fa.relayserver.RelayServer(self)
#Local proxy servers
self.proxyServer = fa.proxies.proxies(self)
#stat server
self.statsServer = secondaryServer.SecondaryServer("Statistic", 11002, self)
#create user interface (main window) and load theme
self.setupUi(self)
self.setStyleSheet(util.readstylesheet("client/client.css"))
self.windowsTitleLabel = QtGui.QLabel(self)
self.windowsTitleLabel.setText("FA Forever " + util.VERSION_STRING)
self.windowsTitleLabel.setProperty("titleLabel", True)
self.setWindowTitle("FA Forever " + util.VERSION_STRING)
# Frameless
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinimizeButtonHint)
self.rubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle)
self.mousePosition = mousePosition(self)
self.installEventFilter(self)
self.minimize = QtGui.QToolButton(self)
self.minimize.setIcon(util.icon("client/minimize-button.png"))
self.maximize = QtGui.QToolButton(self)
self.maximize.setIcon(util.icon("client/maximize-button.png"))
close = QtGui.QToolButton(self)
close.setIcon(util.icon("client/close-button.png"))
self.minimize.setMinimumHeight(10)
close.setMinimumHeight(10)
self.maximize.setMinimumHeight(10)
close.setIconSize(QtCore.QSize(22, 22))
self.minimize.setIconSize(QtCore.QSize(22, 22))
self.maximize.setIconSize(QtCore.QSize(22, 22))
close.setProperty("windowControlBtn", True)
self.maximize.setProperty("windowControlBtn", True)
self.minimize.setProperty("windowControlBtn", True)
self.menu = self.menuBar()
self.topLayout.addWidget(self.menu)
self.topLayout.addWidget(self.windowsTitleLabel)
self.topLayout.addWidget(self.minimize)
self.topLayout.addWidget(self.maximize)
self.topLayout.addWidget(close)
self.topLayout.insertStretch(1, 500)
self.topLayout.setSpacing(0)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.maxNormal = False
close.clicked.connect(self.close);
self.minimize.clicked.connect(self.showSmall)
self.maximize.clicked.connect(self.showMaxRestore)
self.moving = False
self.dragging = False
self.draggingHover = False
self.offset = None
self.curSize = None
sizeGrip = QtGui.QSizeGrip(self)
self.mainGridLayout.addWidget(sizeGrip, 2, 2)
#Wire all important signals
self.mainTabs.currentChanged.connect(self.mainTabChanged)
self.topTabs.currentChanged.connect(self.vaultTabChanged)
#Verrry important step!
self.loadSettingsPrelogin()
self.players = {} # Player names known to the client, contains the player_info messages sent by the server
self.urls = {} # user game location URLs - TODO: Should go in self.players
self.friends = [] # names of the client's friends
self.foes = [] # names of the client's foes
self.power = 0 # current user power
self.email = None
self.coloredNicknames = False
#Initialize the Menu Bar according to settings etc.
self.initMenus()
#Load the icons for the tabs
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.whatNewTab), util.icon("client/feed.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.chatTab), util.icon("client/chat.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.gamesTab), util.icon("client/games.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.coopTab), util.icon("client/coop.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.vaultsTab), util.icon("client/mods.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.galacticwarTab), util.icon("client/gw.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.ladderTab), util.icon("client/ladder.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tourneyTab), util.icon("client/tourney.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.livestreamTab), util.icon("client/twitch.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.replaysTab), util.icon("client/replays.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tutorialsTab), util.icon("client/tutorials.png"))
QtWebKit.QWebSettings.globalSettings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
#for moderator
self.modMenu = None
def eventFilter(self, obj, event):
if (event.type() == QtCore.QEvent.HoverMove):
if self.dragging:
self.draggingHover = True
self.resizeWidget(self.mapToGlobal(event.pos()))
else:
self.draggingHover = False
if self.maxNormal == False:
self.mousePosition.computeMousePosition(event.pos())
else:
self.mousePosition.resetToFalse()
self.updateCursorShape(event.pos())
return False
def updateCursorShape(self, pos):
if self.mousePosition.onTopLeftEdge or self.mousePosition.onBottomRightEdge:
self.mousePosition.cursorShapeChange = True
self.setCursor(QtCore.Qt.SizeFDiagCursor)
elif self.mousePosition.onTopRightEdge or self.mousePosition.onBottomLeftEdge:
self.setCursor(QtCore.Qt.SizeBDiagCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onLeftEdge or self.mousePosition.onRightEdge:
self.setCursor(QtCore.Qt.SizeHorCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onTopEdge or self.mousePosition.onBottomEdge:
self.setCursor(QtCore.Qt.SizeVerCursor)
self.mousePosition.cursorShapeChange = True
else:
if self.mousePosition.cursorShapeChange == True:
self.unsetCursor()
self.mousePosition.cursorShapeChange = False
def showSmall(self):
self.showMinimized()
def showMaxRestore(self):
if(self.maxNormal):
self.maxNormal = False
if self.curSize:
self.setGeometry(self.curSize)
else:
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
def mouseDoubleClickEvent(self, event):
self.showMaxRestore()
def mouseReleaseEvent(self, event):
self.dragging = False
self.moving = False
if self.rubberBand.isVisible():
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(self.rubberBand.geometry())
self.rubberBand.hide()
#self.showMaxRestore()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self.mousePosition.isOnEdge() and self.maxNormal == False:
self.dragging = True
return
else :
self.dragging = False
self.moving = True
self.offset = event.pos()
def mouseMoveEvent(self, event):
if self.dragging and self.draggingHover == False:
self.resizeWidget(event.globalPos())
elif self.moving and self.offset != None:
desktop = QtGui.QDesktopWidget().availableGeometry(self)
if event.globalPos().y() == 0:
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == 0:
desktop.setRight(desktop.right() / 2.0)
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == desktop.right():
desktop.setRight(desktop.right() / 2.0)
desktop.moveLeft(desktop.right())
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
else:
self.rubberBand.hide()
if self.maxNormal == True:
self.showMaxRestore()
self.move(event.globalPos() - self.offset)
def resizeWidget(self, globalMousePos):
if globalMousePos.y() == 0:
self.rubberBand.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
self.rubberBand.show()
else:
self.rubberBand.hide()
origRect = self.frameGeometry()
left, top, right, bottom = origRect.getCoords()
minWidth = self.minimumWidth()
minHeight = self.minimumHeight()
if self.mousePosition.onTopLeftEdge:
left = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomLeftEdge:
left = globalMousePos.x();
bottom = globalMousePos.y();
elif self.mousePosition.onTopRightEdge:
right = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomRightEdge:
right = globalMousePos.x()
bottom = globalMousePos.y()
elif self.mousePosition.onLeftEdge:
left = globalMousePos.x()
elif self.mousePosition.onRightEdge:
right = globalMousePos.x()
elif self.mousePosition.onTopEdge:
top = globalMousePos.y()
elif self.mousePosition.onBottomEdge:
bottom = globalMousePos.y()
newRect = QtCore.QRect(QtCore.QPoint(left, top), QtCore.QPoint(right, bottom))
if newRect.isValid():
if minWidth > newRect.width():
if left != origRect.left() :
newRect.setLeft(origRect.left())
else:
newRect.setRight(origRect.right())
if minHeight > newRect.height() :
if top != origRect.top():
newRect.setTop(origRect.top())
else:
newRect.setBottom(origRect.bottom())
self.setGeometry(newRect)
def setup(self):
import chat
import tourneys
import stats
import vault
import games
import tutorials
import featuredmods
import galacticWar
import downloadManager
import modvault
import coop
from chat._avatarWidget import avatarWidget
#download manager
self.downloader = downloadManager.downloadManager(self)
# Initialize chat
self.chat = chat.Lobby(self)
#build main window with the now active client
self.ladder = stats.Stats(self)
self.games = games.Games(self)
self.tourneys = tourneys.Tourneys(self)
self.vault = vault.MapVault(self)
self.modvault = modvault.ModVault(self)
self.replays = replays.Replays(self)
self.tutorials = tutorials.Tutorials(self)
self.GalacticWar = galacticWar.Lobby(self)
self.Coop = coop.Coop(self)
self.notificationSystem = ns.NotificationSystem(self)
self.friendList = FriendList(self)
# fire to much
#self.usersUpdated.connect(self.friendList.updateFriendList)
# set menu states
self.actionNsEnabled.setChecked(self.notificationSystem.settings.enabled)
# Other windows
self.featuredMods = featuredmods.FeaturedMods(self)
self.avatarAdmin = self.avatarSelection = avatarWidget(self, None)
# warning setup
self.warning = QtGui.QHBoxLayout()
self.warnPlayer = QtGui.QLabel(self)
self.warnPlayer.setText("A player of your skill level is currently searching for a 1v1 game. Click a faction to join him! ")
self.warnPlayer.setAlignment(QtCore.Qt.AlignHCenter)
self.warnPlayer.setAlignment(QtCore.Qt.AlignVCenter)
self.warnPlayer.setProperty("warning", True)
self.rankedAeon = QtGui.QToolButton(self)
self.rankedCybran = QtGui.QToolButton(self)
self.rankedSeraphim = QtGui.QToolButton(self)
self.rankedUEF = QtGui.QToolButton(self)
self.rankedRandom = QtGui.QToolButton(self)
# self.rankedAeon.setAutoRaise(0)
# self.rankedCybran.setAutoRaise(0)
# self.rankedSeraphim.setAutoRaise(0)
# self.rankedUEF.setAutoRaise(0)
# self.rankedRandom.setAutoRaise(0)
self.rankedAeon.setMaximumSize(25, 25)
self.rankedCybran.setMaximumSize(25, 25)
self.rankedSeraphim.setMaximumSize(25, 25)
self.rankedUEF.setMaximumSize(25, 25)
self.rankedRandom.setMaximumSize(25, 25)
self.rankedAeon.setIcon(util.icon("games/automatch/aeon.png"))
self.rankedCybran.setIcon(util.icon("games/automatch/cybran.png"))
self.rankedSeraphim.setIcon(util.icon("games/automatch/seraphim.png"))
self.rankedUEF.setIcon(util.icon("games/automatch/uef.png"))
self.rankedRandom.setIcon(util.icon("games/automatch/random.png"))
self.warning.addStretch()
self.warning.addWidget(self.warnPlayer)
self.warning.addWidget(self.rankedUEF)
self.warning.addWidget(self.rankedCybran)
self.warning.addWidget(self.rankedAeon)
self.warning.addWidget(self.rankedSeraphim)
self.warning.addWidget(self.rankedRandom)
self.warning.addStretch()
self.mainGridLayout.addLayout(self.warning, 2, 0)
self.rankedAeon.clicked.connect(self.rankedGameAeon)
self.rankedCybran.clicked.connect(self.rankedGameCybran)
self.rankedSeraphim.clicked.connect(self.rankedGameSeraphim)
self.rankedUEF.clicked.connect(self.rankedGameUEF)
self.rankedRandom.clicked.connect(self.rankedGameRandom)
self.warningHide()
def show(self):
super(FormClass, self).show()
if self.friendList.enabled:
self.friendList.dialog.show()
def warningHide(self):
'''
hide the warning bar for matchmaker
'''
self.warnPlayer.hide()
self.rankedUEF.hide()
self.rankedAeon.hide()
self.rankedCybran.hide()
self.rankedSeraphim.hide()
self.rankedRandom.hide()
def warningShow(self):
'''
show the warning bar for matchmaker
'''
self.warnPlayer.show()
self.rankedUEF.show()
self.rankedAeon.show()
self.rankedCybran.show()
self.rankedSeraphim.show()
self.rankedRandom.show()
@QtCore.pyqtSlot()
def cleanup(self):
'''
Perform cleanup before the UI closes
'''
self.state = ClientState.SHUTDOWN
self.progress.setWindowTitle("FAF is shutting down")
self.progress.setMinimum(0)
self.progress.setMaximum(0)
self.progress.setValue(0)
self.progress.setCancelButton(None)
self.progress.show()
#Important: If a game is running, offer to terminate it gently
self.progress.setLabelText("Closing ForgedAlliance.exe")
fa.exe.close()
#Terminate Lobby Server connection
if self.socket.state() == QtNetwork.QTcpSocket.ConnectedState:
self.progress.setLabelText("Closing main connection.")
self.socket.disconnectFromHost()
# Clear UPnP Mappings...
if self.useUPnP:
self.progress.setLabelText("Removing UPnP port mappings")
fa.upnp.removePortMappings()
#Terminate local ReplayServer
if self.replayServer:
self.progress.setLabelText("Terminating local replay server")
self.replayServer.close()
self.replayServer = None
#Terminate local ReplayServer
if self.relayServer:
self.progress.setLabelText("Terminating local relay server")
self.relayServer.close()
self.relayServer = None
#Clean up Chat
if self.chat:
self.progress.setLabelText("Disconnecting from IRC")
self.chat.disconnect()
self.chat = None
# Get rid of the Tray icon
if self.tray:
self.progress.setLabelText("Removing System Tray icon")
self.tray.deleteLater()
self.tray = None
#Terminate UI
if self.isVisible():
self.progress.setLabelText("Closing main window")
self.close()
self.progress.close()
def closeEvent(self, event):
logger.info("Close Event for Application Main Window")
self.saveWindow()
if (fa.exe.running()):
if QtGui.QMessageBox.question(self, "Are you sure?", "Seems like you still have Forged Alliance running!<br/><b>Close anyway?</b>", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
event.ignore()
return
return QtGui.QMainWindow.closeEvent(self, event)
def resizeEvent(self, size):
self.resizeTimer.start(400)
def resized(self):
self.resizeTimer.stop()
self.doneresize.emit()
def initMenus(self):
self.actionLinkMumble.triggered.connect(self.linkMumble)
self.actionLink_account_to_Steam.triggered.connect(self.linkToSteam)
self.actionLinkWebsite.triggered.connect(self.linkWebsite)
self.actionLinkWiki.triggered.connect(self.linkWiki)
self.actionLinkForums.triggered.connect(self.linkForums)
self.actionLinkUnitDB.triggered.connect(self.linkUnitDB)
self.actionNsSettings.triggered.connect(lambda : self.notificationSystem.on_showSettings())
self.actionNsEnabled.triggered.connect(lambda enabled : self.notificationSystem.setNotificationEnabled(enabled))
self.actionWiki.triggered.connect(self.linkWiki)
self.actionReportBug.triggered.connect(self.linkReportBug)
self.actionShowLogs.triggered.connect(self.linkShowLogs)
self.actionTechSupport.triggered.connect(self.linkTechSupport)
self.actionAbout.triggered.connect(self.linkAbout)
self.actionClearCache.triggered.connect(self.clearCache)
self.actionClearSettings.triggered.connect(self.clearSettings)
self.actionClearGameFiles.triggered.connect(self.clearGameFiles)
self.actionTestingConnections.triggered.connect(self.runTesting)
self.actionSetGamePath.triggered.connect(self.switchPath)
self.actionSetGamePort.triggered.connect(self.switchPort)
self.actionSetMumbleOptions.triggered.connect(self.setMumbleOptions)
#Toggle-Options
self.actionSetAutoLogin.triggered.connect(self.updateOptions)
self.actionSetSoundEffects.triggered.connect(self.updateOptions)
self.actionSetOpenGames.triggered.connect(self.updateOptions)
self.actionSetJoinsParts.triggered.connect(self.updateOptions)
self.actionSetAutoPostJoin.triggered.connect(self.updateOptions)
self.actionSetLiveReplays.triggered.connect(self.updateOptions)
self.actionSaveGamelogs.triggered.connect(self.updateOptions)
self.actionColoredNicknames.triggered.connect(self.updateOptions)
self.actionActivateMumbleSwitching.triggered.connect(self.saveMumbleSwitching)
#Init themes as actions.
themes = util.listThemes()
for theme in themes:
action = self.menuTheme.addAction(str(theme))
action.triggered.connect(self.switchTheme)
action.theme = theme
action.setCheckable(True)
if util.getTheme() == theme:
action.setChecked(True)
# Nice helper for the developers
self.menuTheme.addSeparator()
self.menuTheme.addAction("Reload Stylesheet", lambda: self.setStyleSheet(util.readstylesheet("client/client.css")))
@QtCore.pyqtSlot()
def updateOptions(self):
self.autologin = self.actionSetAutoLogin.isChecked()
self.soundeffects = self.actionSetSoundEffects.isChecked()
self.opengames = self.actionSetOpenGames.isChecked()
self.joinsparts = self.actionSetJoinsParts.isChecked()
self.autopostjoin = self.actionSetAutoPostJoin.isChecked()
self.livereplays = self.actionSetLiveReplays.isChecked()
self.gamelogs = self.actionSaveGamelogs.isChecked()
self.coloredNicknames = self.actionColoredNicknames.isChecked()
self.saveChat()
self.saveCredentials()
pass
@QtCore.pyqtSlot()
def switchTheme(self):
util.setTheme(self.sender().theme, True)
@QtCore.pyqtSlot()
def switchPath(self):
fa.updater.Wizard(self).exec_()
@QtCore.pyqtSlot()
def switchPort(self):
import loginwizards
loginwizards.gameSettingsWizard(self).exec_()
@QtCore.pyqtSlot()
def linkToSteam(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(STEAMLINK_URL))
@QtCore.pyqtSlot()
def setMumbleOptions(self):
import loginwizards
loginwizards.mumbleOptionsWizard(self).exec_()
@QtCore.pyqtSlot()
def clearSettings(self):
result = QtGui.QMessageBox.question(None, "Clear Settings", "Are you sure you wish to clear all settings, login info, etc. used by this program?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if (result == QtGui.QMessageBox.Yes):
util.settings.clear()
util.settings.sync()
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def clearGameFiles(self):
util.clearDirectory(util.BIN_DIR)
util.clearDirectory(util.GAMEDATA_DIR)
@QtCore.pyqtSlot()
def clearCache(self):
changed = util.clearDirectory(util.CACHE_DIR)
if changed:
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def linkMumble(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(MUMBLE_URL.format(login=self.login)))
@QtCore.pyqtSlot()
def linkWebsite(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(WEBSITE_URL))
@QtCore.pyqtSlot()
def linkWiki(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(WIKI_URL))
@QtCore.pyqtSlot()
def linkForums(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(FORUMS_URL))
@QtCore.pyqtSlot()
def linkUnitDB(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(UNITDB_URL))
@QtCore.pyqtSlot()
def linkReportBug(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(TICKET_URL))
#from util.report import ReportDialog
#ReportDialog(self).show()
@QtCore.pyqtSlot()
def linkTechSupport(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(SUPPORT_URL))
@QtCore.pyqtSlot()
def linkShowLogs(self):
util.showInExplorer(util.LOG_DIR)
@QtCore.pyqtSlot()
def linkAbout(self):
dialog = util.loadUi("client/about.ui")
dialog.exec_()
def saveCredentials(self):
util.settings.beginGroup("user")
util.settings.setValue("user/remember", self.remember) #always remember to remember
if self.remember:
util.settings.setValue("user/login", self.login)
util.settings.setValue("user/password", self.password)
util.settings.setValue("user/autologin", self.autologin) #only autologin if remembering
else:
util.settings.setValue("user/login", None)
util.settings.setValue("user/password", None)
util.settings.setValue("user/autologin", False)
util.settings.endGroup()
util.settings.sync()
def clearAutologin(self):
self.autologin = False
self.actionSetAutoLogin.setChecked(False)
util.settings.beginGroup("user")
util.settings.setValue("user/autologin", False)
util.settings.endGroup()
util.settings.sync()
def saveWindow(self):
util.settings.beginGroup("window")
util.settings.setValue("geometry", self.saveGeometry())
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
util.settings.setValue("app/falogs", self.gamelogs)
util.settings.endGroup()
def savePort(self):
util.settings.beginGroup("ForgedAlliance")
util.settings.setValue("app/gameport", self.gamePort)
util.settings.setValue("app/upnp", self.useUPnP)
util.settings.endGroup()
util.settings.sync()
def saveMumble(self):
util.settings.beginGroup("Mumble")
util.settings.setValue("app/mumble", self.enableMumble)
util.settings.endGroup()
util.settings.sync()
def saveMumbleSwitching(self):
self.activateMumbleSwitching = self.actionActivateMumbleSwitching.isChecked()
util.settings.beginGroup("Mumble")
util.settings.setValue("app/activateMumbleSwitching", self.activateMumbleSwitching)
util.settings.endGroup()
util.settings.sync()
@QtCore.pyqtSlot()
def saveChat(self):
util.settings.beginGroup("chat")
util.settings.setValue("soundeffects", self.soundeffects)
util.settings.setValue("livereplays", self.livereplays)
util.settings.setValue("opengames", self.opengames)
util.settings.setValue("joinsparts", self.joinsparts)
util.settings.setValue("autopostjoin", self.autopostjoin)
util.settings.setValue("coloredNicknames", self.coloredNicknames)
util.settings.endGroup()
@QtCore.pyqtSlot(bool)
def on_actionFriendlist_toggled(self, checked):
util.settings.beginGroup("friendlist")
util.settings.setValue("enabled", checked)
util.settings.endGroup()
self.friendList.dialog.setVisible(checked)
def loadSettingsPrelogin(self):
util.settings.beginGroup("user")
self.login = util.settings.value("user/login")
self.password = util.settings.value("user/password")
self.remember = (util.settings.value("user/remember") == "true")
# This is the new way we do things.
self.autologin = (util.settings.value("user/autologin") == "true")
self.actionSetAutoLogin.setChecked(self.autologin)
util.settings.endGroup()
def loadSettings(self):
#Load settings
fa.loadPath()
util.settings.beginGroup("window")
geometry = util.settings.value("geometry", None)
if geometry:
self.restoreGeometry(geometry)
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
self.gamePort = int(util.settings.value("app/gameport", GAME_PORT_DEFAULT))
self.useUPnP = (util.settings.value("app/upnp", "false") == "true")
self.gamelogs = (util.settings.value("app/falogs", "false") == "true")
self.actionSaveGamelogs.setChecked(self.gamelogs)
util.settings.endGroup()
util.settings.beginGroup("Mumble")
if util.settings.value("app/mumble", "firsttime") == "firsttime":
# The user has never configured mumble before. Be a little intrusive and ask him if he wants to use it.
if QtGui.QMessageBox.question(self, "Enable Voice Connector?", "FA Forever can connect with <a href=\"http://mumble.sourceforge.net/\">Mumble</a> to support the automatic setup of voice connections between you and your team mates. Would you like to enable this feature? You can change the setting at any time by going to options -> settings -> Voice", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
util.settings.setValue("app/mumble", "true")
else:
util.settings.setValue("app/mumble", "false")
if util.settings.value("app/activateMumbleSwitching", "firsttime") == "firsttime":
util.settings.setValue("app/activateMumbleSwitching", "true")
self.enableMumble = (util.settings.value("app/mumble", "false") == "true")
self.activateMumbleSwitching = (util.settings.value("app/activateMumbleSwitching", "false") == "true")
util.settings.endGroup()
self.actionActivateMumbleSwitching.setChecked(self.activateMumbleSwitching)
self.loadChat()
def loadChat(self):
try:
util.settings.beginGroup("chat")
self.soundeffects = (util.settings.value("soundeffects", "true") == "true")
self.opengames = (util.settings.value("opengames", "true") == "true")
self.joinsparts = (util.settings.value("joinsparts", "false") == "true")
self.livereplays = (util.settings.value("livereplays", "true") == "true")
self.autopostjoin = (util.settings.value("autopostjoin", "true") == "true")
self.coloredNicknames = (util.settings.value("coloredNicknames", "false") == "true")
util.settings.endGroup()
self.actionColoredNicknames.setChecked(self.coloredNicknames)
self.actionSetSoundEffects.setChecked(self.soundeffects)
self.actionSetLiveReplays.setChecked(self.livereplays)
self.actionSetOpenGames.setChecked(self.opengames)
self.actionSetJoinsParts.setChecked(self.joinsparts)
self.actionSetAutoPostJoin.setChecked(self.autopostjoin)
except:
pass
def processTestGameportDatagram(self):
self.udpTest = True
def testGamePort(self):
'''
Here, we test with the server if the current game port set is all right.
If not, we propose alternatives to the user
'''
if self.useUPnP:
fa.upnp.createPortMapping(self.localIP, self.gamePort, "UDP")
#binding the port
udpSocket = QtNetwork.QUdpSocket(self)
udpSocket.bind(self.gamePort)
udpSocket.readyRead.connect(self.processTestGameportDatagram)
if udpSocket.localPort() != self.gamePort :
logger.error("The game port set (%i) is not available." % self.gamePort)
answer = QtGui.QMessageBox.warning(None, "Port Occupied", "FAF has detected that the gameport you choose is not available. Possible reasons:<ul><li><b>FAF is already running</b> (most likely)</li><li>another program is listening on port {port}</li></ul><br>If you click Apply, FAF will port {port2} for this session.".format(port=self.gamePort, port2=udpSocket.localPort()), QtGui.QMessageBox.Apply, QtGui.QMessageBox.Abort)
if answer == QtGui.QMessageBox.Apply:
self.gamePort = udpSocket.localPort()
else :
udpSocket.close()
udpSocket.deleteLater()
return False
logger.info("The game port is now set to %i" % self.gamePort)
#now we try sending a packet to the server
logger.info("sending packet to " + LOBBY_HOST)
if udpSocket.writeDatagram(self.login, QtNetwork.QHostAddress(QtNetwork.QHostInfo.fromName(LOBBY_HOST).addresses ()[0]), 30351) == -1 :
logger.info("Unable to send UDP Packet")
QtGui.QMessageBox.critical(self, "UDP Packet not sent !", "We are not able to send a UDP packet. <br><br>Possible reasons:<ul><li><b>Your firewall is blocking the UDP port {port}.</b></li><li><b>Your router is blocking or routing port {port} in a wrong way.</b></li></ul><br><font size='+2'>How to fix this : </font> <ul><li>Check your firewall and router. <b>More info in the wiki (Links -> Wiki)</li></b><li>You should also consider using <b>uPnP (Options -> Settings -> Gameport)</b></li><li>You should ask for assistance in the TechQuestions chat and/or in the <b>technical forum (Links -> Forums<b>)</li></ul><br><font size='+1'><b>FA will not be able to perform correctly until this issue is fixed.</b></font>".format(port=self.gamePort))
self.progress.setCancelButtonText("Cancel")
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(False)
self.progress.setAutoReset(False)
self.progress.setModal(1)
self.progress.setWindowTitle("UDP test...")
self.progress.setLabelText("We are waiting for an UDP answer from the server on port %i." % (self.gamePort))
self.progress.show()
timer = time.time()
interval = 1
while self.udpTest == False :
QtGui.QApplication.processEvents()
if time.time() - timer > interval :
udpSocket.writeDatagram(self.login, QtNetwork.QHostAddress("91.236.254.74"), 30351)
interval = interval + 1
if time.time() - timer > 10 :
break
self.progress.close()
udpSocket.close()
udpSocket.deleteLater()
if self.udpTest == False :
logger.info("Unable to receive UDP Packet")
QtGui.QMessageBox.critical(self, "UDP Packet not received !", "We didn't received any answer from the server. <br><br>Possible reasons:<ul><li><b>Your firewall is blocking the UDP port {port}.</b></li><li><b>Your router is blocking or routing port {port} in a wrong way/to the wrong computer.</b></li></ul><br><font size='+2'>How to fix this : </font> <ul><li>Check your firewall and router. <b>More info in the wiki (Links -> Wiki)</li></b><li>You should also consider using <b>uPnP (Options -> Settings -> Gameport)</b></li><li>You should ask for assistance in the TechQuestions chat and/or in the <b>technical forum (Links -> Forums<b>)</li></ul><br><font size='+1'><b>FA will not be able to perform correctly until this issue is fixed.</b></font>".format(port=self.gamePort))
return True
def doConnect(self):
if not self.replayServer.doListen(LOCAL_REPLAY_PORT):
return False
if not self.relayServer.doListen():
return False
self.progress.setCancelButtonText("Cancel")
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(False)
self.progress.setAutoReset(False)
self.progress.setModal(1)
self.progress.setWindowTitle("Connecting...")
self.progress.setLabelText("Establishing connection ...")
self.progress.show()
# Begin connecting.
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
while (self.socket.state() != QtNetwork.QAbstractSocket.ConnectedState) and self.progress.isVisible():
QtGui.QApplication.processEvents()
self.state = ClientState.NONE
self.localIP = str(self.socket.localAddress().toString())
# #Perform Version Check first
if not self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState:
self.progress.close() # in case it was still showing...
# We either cancelled or had a TCP error, meaning the connection failed..
if self.progress.wasCanceled():
logger.warn("doConnect() aborted by user.")
else:
logger.error("doConnect() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
return False
else:
return True
def waitSession(self):
self.progress.setLabelText("Setting up Session...")
self.send(dict(command="ask_session"))
start = time.time()
while self.session == None and self.progress.isVisible() :
QtGui.QApplication.processEvents()
if time.time() - start > 15 :
break
if not self.session :
if self.progress.wasCanceled():
logger.warn("waitSession() aborted by user.")
else :
logger.error("waitSession() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
QtGui.QMessageBox.critical(self, "Notice from Server", "Unable to get a session : <br> Server under maintenance.<br><br>Please retry in some minutes.")
return False
self.uniqueId = util.uniqueID(self.login, self.session)
self.loadSettings()
#
# Voice connector (This isn't supposed to be here, but I need the settings to be loaded before I can determine if we can hook in the mumbleConnector
#
if self.enableMumble:
self.progress.setLabelText("Setting up Mumble...")
import mumbleconnector
self.mumbleConnector = mumbleconnector.MumbleConnector(self)
return True
def doLogin(self):
#Determine if a login wizard needs to be displayed and do so
if not self.autologin or not self.password or not self.login:
import loginwizards
if not loginwizards.LoginWizard(self).exec_():
return False;
self.progress.setLabelText("Logging in...")
self.progress.reset()
self.progress.show()
self.login = self.login.strip()
logger.info("Attempting to login as: " + str(self.login))
self.state = ClientState.NONE
if not self.uniqueId :
QtGui.QMessageBox.warning(QtGui.QApplication.activeWindow(), "Unable to login", "It seems that you miss some important DLL.<br>Please install :<br><a href =\"http://www.microsoft.com/download/en/confirmation.aspx?id=8328\">http://www.microsoft.com/download/en/confirmation.aspx?id=8328</a> and <a href = \"http://www.microsoft.com/en-us/download/details.aspx?id=17851\">http://www.microsoft.com/en-us/download/details.aspx?id=17851</a><br><br>You probably have to restart your computer after installing them.<br><br>Please visit this link in case of problems : <a href=\"http://www.faforever.com/forums/viewforum.php?f=3\">http://www.faforever.com/forums/viewforum.php?f=3</a>", QtGui.QMessageBox.Close)
return False
else :
self.send(dict(command="hello", version=util.VERSION, login=self.login, password=self.password, unique_id=self.uniqueId, local_ip=self.localIP))
while (not self.state) and self.progress.isVisible():
QtGui.QApplication.processEvents()
if self.progress.wasCanceled():
logger.warn("Login aborted by user.")
return False
self.progress.close()
if self.state == ClientState.OUTDATED :
logger.warn("Client is OUTDATED.")
elif self.state == ClientState.ACCEPTED:
logger.info("Login accepted.")
# update what's new page
self.whatNewsView.setUrl(QtCore.QUrl("http://www.faforever.com/?page_id=114&username={user}&pwdhash={pwdhash}".format(user=self.login, pwdhash=self.password)))
# live streams
self.LivestreamWebView.setUrl(QtCore.QUrl("http://www.faforever.com/?page_id=974"))
util.report.BUGREPORT_USER = self.login
util.crash.CRASHREPORT_USER = self.login
if not self.testGamePort() :
return False
#success: save login data (if requested) and carry on
self.actionSetAutoLogin.setChecked(self.autologin)
self.updateOptions()
self.progress.close()
#This is a triumph... I'm making a note here: Huge success!
self.connected.emit()
return True
elif self.state == ClientState.REJECTED:
logger.warning("Login rejected.")
#seems that there isa bug in a key ..
util.settings.beginGroup("window")
util.settings.remove("geometry")
util.settings.endGroup()
self.clearAutologin()
return self.doLogin() #Just try to login again, slightly hackish but I can get away with it here, I guess.
else:
# A more profound error has occurrect (cancellation or disconnection)
return False
def loginCreation(self, result):
'''
Simply acknowledges the answer the server gave to our account creation attempt,
and sets the client's state accordingly so the Account Creation Wizard
can continue its work.
'''
logger.debug("Account name free and valid: " + result)
if result == "yes" :
self.state = ClientState.CREATED
else:
self.state = ClientState.REJECTED
def isFriend(self, name):
'''
Convenience function for other modules to inquire about a user's friendliness.
'''
return name in self.friends
def isFoe(self, name):
'''
Convenience function for other modules to inquire about a user's foeliness.
'''
return name in self.foes
def isPlayer(self, name):
'''
Convenience function for other modules to inquire about a user's civilian status.
'''
return name in self.players or name == self.login
#Color table used by the following method
# CAVEAT: This will break if the theme is loaded after the client package is imported
colors = json.loads(util.readfile("client/colors.json"))
randomcolors = json.loads(util.readfile("client/randomcolors.json"))
def getUserClan(self, name):
'''
Returns a user's clan if any
'''
if name in self.players:
if "clan" in self.players[name]:
return self.players[name]["clan"]
return ""
def getCompleteUserName(self, name, html = False):
clan = self.getUserClan(name)
if clan != '':
if html:
return '<b>[%s]</b>%s' % (clan, name)
else:
return '[%s] %s' % (clan, name)
return name
def getUserLeague(self, name):
'''
Returns a user's league if any
'''
if name in self.players:
if "league" in self.players[name] :
return self.players[name]["league"]
return None
def getUserCountry(self, name):
'''
Returns a user's country if any
'''
if name in self.players:
if "country" in self.players[name] :
return self.players[name]["country"]
return None
def getUserAvatar(self, name):
'''
Returns a user's avatar if any
'''
if name in self.players:
return self.players[name]["avatar"]
else:
return None
def getUserColor(self, name):
'''
Returns a user's color depending on their status with relation to the FAF client
'''
if name == self.login:
return self.getColor("self")
elif name in self.friends:
return self.getColor("friend")
elif name in self.foes:
return self.getColor("foe")
elif name in self.players:
if self.coloredNicknames:
return self.getRandomColor(name)
else:
return self.getColor("player")
else:
if self.coloredNicknames:
return self.getRandomColor(name)
else:
return self.getColor("default")
def getRandomColor(self, name):
'''Generate a random color from a name'''
random.seed(name)
return random.choice(self.randomcolors)
def getColor(self, name):
if name in self.colors:
return self.colors[name]
else:
return self.colors["default"]
def getUserRanking(self, name):
'''
Returns a user's ranking (trueskill rating) as a float.
'''
if name in self.players:
return int(max(0, round((self.players[name]["rating_mean"] - 3 * self.players[name]["rating_deviation"])/100.0)*100))
else:
return None
@QtCore.pyqtSlot()
def startedFA(self):
'''
Slot hooked up to fa.exe.instance when the process has launched.
It will notify other modules through the signal gameEnter().
'''
logger.info("FA has launched in an attached process.")
self.send(dict(command="fa_state", state="on"))
self.gameEnter.emit()
@QtCore.pyqtSlot(int)
def finishedFA(self, exit_code):
'''
Slot hooked up to fa.exe.instance when the process has ended.
It will notify other modules through the signal gameExit().
'''
if not exit_code:
logger.info("FA has finished with exit code: " + str(exit_code))
else:
logger.warn("FA has finished with exit code: " + str(exit_code))
self.send(dict(command="fa_state", state="off"))
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def errorFA(self, error_code):
'''
Slot hooked up to fa.exe.instance when the process has failed to start.
'''
if error_code == 0:
logger.error("FA has failed to start")
QtGui.QMessageBox.critical(self, "Error from FA", "FA has failed to start.")
elif error_code == 1:
logger.error("FA has crashed or killed after starting")
else:
text = "FA has failed to start with error code: " + str(error_code)
logger.error(text)
QtGui.QMessageBox.critical(self, "Error from FA", text)
self.send(dict(command="fa_state", state="off"))
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def mainTabChanged(self, index):
'''
The main visible tab (module) of the client's UI has changed.
In this case, other modules may want to load some data or cease
particularly CPU-intensive interactive functionality.
LATER: This can be rewritten as a simple Signal that each module can then individually connect to.
'''
new_tab = self.mainTabs.widget(index)
if new_tab is self.gamesTab:
self.showGames.emit()
if new_tab is self.chatTab:
self.showChat.emit()
if new_tab is self.replaysTab:
self.showReplays.emit()
if new_tab is self.ladderTab:
self.showLadder.emit()
if new_tab is self.tourneyTab:
self.showTourneys.emit()
if new_tab is self.galacticwarTab:
self.showGalaxyWar.emit()
if new_tab is self.coopTab:
self.showCoop.emit()
@QtCore.pyqtSlot(int)
def vaultTabChanged(self, index):
new_tab = self.topTabs.widget(index)
if new_tab is self.mapsTab:
self.showMaps.emit()
if new_tab is self.modsTab:
self.showMods.emit()
def joinGameFromURL(self, url):
'''
Tries to join the game at the given URL
'''
logger.debug("joinGameFromURL: " + url.toString())
if (fa.exe.available()):
add_mods = []
try:
modstr = url.queryItemValue("mods")
add_mods = json.loads(modstr) # should be a list
except:
logger.info("Couldn't load urlquery value 'mods'")
if fa.exe.check(url.queryItemValue("mod"), url.queryItemValue("map"), sim_mods=add_mods):
self.send(dict(command="game_join", uid=int(url.queryItemValue("uid")), gameport=self.gamePort))
def loginWriteToFaServer(self, action, *args, **kw):
'''
This is a specific method that handles sending Login-related and update-related messages to the server.
'''
self.state = ClientState.NONE
logger.debug("Login Write: " + action)
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.ReadWrite)
out.setVersion(QtCore.QDataStream.Qt_4_2)
out.writeUInt32(0)
out.writeQString(action)
for arg in args :
if type(arg) is IntType:
out.writeInt(arg)
elif isinstance(arg, basestring):
out.writeQString(arg)
elif type(arg) is FloatType:
out.writeFloat(arg)
elif type(arg) is ListType:
out.writeQVariantList(arg)
elif type(arg) is DictType:
out.writeQString(json.dumps(arg))
else:
logger.warn("Uninterpreted Data Type: " + str(type(arg)) + " of value: " + str(arg))
out.writeQString(str(arg))
out.device().seek(0)
out.writeUInt32(block.size() - 4)
self.socket.write(block)
QtGui.QApplication.processEvents()
def writeToServer(self, action, *args, **kw):
'''
This method is the workhorse of the client, and is used to send messages, queries and commands to the server.
'''
logger.debug("Client: " + action)
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.ReadWrite)
out.setVersion(QtCore.QDataStream.Qt_4_2)
out.writeUInt32(0)
out.writeQString(action)
out.writeQString(self.login)
out.writeQString(self.session)
for arg in args :
if type(arg) is IntType:
out.writeInt(arg)
elif isinstance(arg, basestring):
out.writeQString(arg)
elif type(arg) is FloatType:
out.writeFloat(arg)
elif type(arg) is ListType:
out.writeQVariantList(arg)
elif type(arg) is DictType:
out.writeQString(json.dumps(arg))
elif type(arg) is QtCore.QFile :
arg.open(QtCore.QIODevice.ReadOnly)
fileDatas = QtCore.QByteArray(arg.readAll())
#seems that that logger doesn't work
#logger.debug("file size ", int(fileDatas.size()))
out.writeInt(fileDatas.size())
out.writeRawData(fileDatas)
# This may take a while. We display the progress bar so the user get a feedback
self.sendFile = True
self.progress.setLabelText("Sending file to server")
self.progress.setCancelButton(None)
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(True)
self.progress.setMinimum(0)
self.progress.setMaximum(100)
self.progress.setModal(1)
self.progress.setWindowTitle("Uploading in progress")
self.progress.show()
arg.close()
else:
logger.warn("Uninterpreted Data Type: " + str(type(arg)) + " sent as str: " + str(arg))
out.writeQString(str(arg))
out.device().seek(0)
out.writeUInt32(block.size() - 4)
self.bytesToSend = block.size() - 4
self.socket.write(block)
@QtCore.pyqtSlot()
def readFromServer(self):
ins = QtCore.QDataStream(self.socket)
ins.setVersion(QtCore.QDataStream.Qt_4_2)
while ins.atEnd() == False :
if self.blockSize == 0:
if self.socket.bytesAvailable() < 4:
return
self.blockSize = ins.readUInt32()
if self.socket.bytesAvailable() < self.blockSize:
return
action = ins.readQString()
self.process(action, ins)
self.blockSize = 0
@QtCore.pyqtSlot()
def disconnectedFromServer(self):
logger.warn("Disconnected from lobby server.")
if self.state == ClientState.ACCEPTED:
QtGui.QMessageBox.warning(QtGui.QApplication.activeWindow(), "Disconnected from FAF", "The lobby lost the connection to the FAF server.<br/><b>You might still be able to chat.<br/>To play, try reconnecting a little later!</b>", QtGui.QMessageBox.Close)
#Clear the online users lists
oldplayers = self.players.keys()
self.players = {}
self.urls = {}
self.usersUpdated.emit(oldplayers)
self.disconnected.emit()
self.mainTabs.setCurrentIndex(0)
for i in range(1, self.mainTabs.count()):
self.mainTabs.setTabEnabled(i, False)
self.mainTabs.setTabText(i, "offline")
self.state = ClientState.DROPPED
@QtCore.pyqtSlot(QtNetwork.QAbstractSocket.SocketError)
def socketError(self, error):
logger.error("TCP Socket Error: " + self.socket.errorString())
if self.state > ClientState.NONE: # Positive client states deserve user notification.
QtGui.QMessageBox.critical(None, "TCP Error", "A TCP Connection Error has occurred:<br/><br/><b>" + self.socket.errorString() + "</b>", QtGui.QMessageBox.Close)
@QtCore.pyqtSlot()
def forwardLocalBroadcast(self, source, message):
self.localBroadcast.emit(source, message)
#@QtCore.pyqtSlot()
def forwardPublicBroadcast(self, message):
self.publicBroadcast.emit(message)
def manage_power(self):
''' update the interface accordingly to the power of the user'''
if self.power >= 1 :
if self.modMenu == None :
self.modMenu = self.menu.addMenu("Administration")
actionAvatar = QtGui.QAction("Avatar manager", self.modMenu)
actionAvatar.triggered.connect(self.avatarManager)
self.modMenu.addAction(actionAvatar)
def requestAvatars(self, personal):
if personal :
self.send(dict(command="avatar", action="list_avatar"))
else :
self.send(dict(command="admin", action="requestavatars"))
def joinChannel(self, user, channel):
'''Close FA remotly'''
self.send(dict(command="admin", action="join_channel", users=[user], channel=channel))
def closeFA(self, userToClose):
'''Close FA remotly'''
self.send(dict(command="admin", action="closeFA", user=userToClose))
def closeLobby(self, userToClose):
'''Close lobby remotly'''
self.send(dict(command="admin", action="closelobby", user=userToClose))
def addFriend(self, friend):
'''Adding a new friend by user'''
self.friends.append(friend)
self.send(dict(command="social", friends=self.friends)) #LATER: Use this line instead
#self.writeToServer("ADD_FRIEND", friend)
self.usersUpdated.emit([friend])
self.friendList.addFriend(friend)
def addFoe(self, foe):
'''Adding a new foe by user'''
self.foes.append(foe)
self.send(dict(command="social", foes=self.foes)) #LATER: Use this line instead
#self.writeToServer("ADD_FRIEND", friend)
self.usersUpdated.emit([foe])
def remFriend(self, friend):
'''Removal of a friend by user'''
self.friends.remove(friend)
#self.writeToServer("REMOVE_FRIEND", friend)
self.send(dict(command="social", friends=self.friends)) #LATER: Use this line instead
self.usersUpdated.emit([friend])
self.friendList.removeFriend(friend)
def remFoe(self, foe):
'''Removal of a foe by user'''
self.foes.remove(foe)
#self.writeToServer("REMOVE_FRIEND", friend)
self.send(dict(command="social", foes=self.foes)) #LATER: Use this line instead
self.usersUpdated.emit([foe])
def process(self, action, stream):
logger.debug("Server: " + action)
if action == "PING":
self.writeToServer("PONG")
elif action == "LOGIN_AVAILABLE" :
result = stream.readQString()
name = stream.readQString()
logger.info("LOGIN_AVAILABLE: " + name + " - " + result)
self.loginCreation(result)
elif action == 'ACK' :
bytesWritten = stream.readQString()
logger.debug("Acknowledged %s bytes" % bytesWritten)
if self.sendFile == True :
self.progress.setValue(int(bytesWritten) * 100 / self.bytesToSend)
if int(bytesWritten) >= self.bytesToSend :
self.progress.close()
self.sendFile = False
elif action == 'ERROR' :
message = stream.readQString()
data = stream.readQString()
logger.error("Protocol Error, server says: " + message + " - " + data)
elif action == "MESSAGE":
stream.readQString()
stream.readQString()
pass
else:
try:
self.dispatch(json.loads(action))
except:
logger.error("Error dispatching JSON: " + action, exc_info=sys.exc_info())
#
# JSON Protocol v2 Implementation below here
#
def send(self, message):
data = json.dumps(message)
if message["command"] == "hello" :
logger.info("Outgoing JSON Message: login.")
else :
logger.info("Outgoing JSON Message: " + data)
self.writeToServer(data)
def dispatch(self, message):
'''
A fairly pythonic way to process received strings as JSON messages.
'''
# add a delay to the notification system
if 'channels' in message:
self.notificationSystem.disabledStartup = False
try:
if "debug" in message:
logger.info(message['debug'])
if "command" in message:
cmd = "handle_" + message['command']
if hasattr(self, cmd):
getattr(self, cmd)(message)
else:
logger.error("Unknown command for JSON." + message['command'])
raise "StandardError"
else:
logger.debug("No command in message.")
except:
raise #Pass it on to our caller, Malformed Command
def handle_stats(self, message):
self.statsInfo.emit(message)
def handle_welcome(self, message):
if "session" in message :
self.session = str(message["session"])
elif "update" in message :
# fix a problem with Qt.
util.settings.beginGroup("window")
util.settings.remove("geometry")
util.settings.endGroup()
if not util.developer():
logger.warn("Server says that Updating is needed.")
self.progress.close()
self.state = ClientState.OUTDATED
fa.updater.fetchClientUpdate(message["update"])
else:
logger.debug("Skipping update because this is a developer version.")
logger.debug("Login success")
self.state = ClientState.ACCEPTED
else :
self.email = message["email"]
logger.debug("Login success")
self.state = ClientState.ACCEPTED
def handle_game_launch(self, message):
logger.info("Handling game_launch via JSON " + str(message))
silent = False
if 'args' in message:
arguments = message['args']
else:
arguments = []
# Important: This is the race parameter used by ladder search.
if 'mod' in message:
modkey = 'mod'
else:
modkey = 'featured_mod'
# Do some special things depending of the reason of the game launch.
rank = False
galacticWar = False
if 'reason' in message:
if message['reason'] == 'gw' :
rank = True
galacticWar = True
silent = True
if "luatable" in message:
fa.gwgametable.writeTable(message["luatable"], "gwReinforcementList.gw")
if (not fa.exe.check(message[modkey], silent=silent)):
logger.error("Can't play %s without successfully updating Forged Alliance." % message[modkey])
return
# HACK: Ideally, this comes from the server, too. LATER: search_ranked message
if rank :
arguments.append('/rank')
arguments.append(str(self.GalacticWar.rank))
elif message[modkey] == "ladder1v1":
arguments.append(self.games.race)
#Player 1v1 rating
arguments.append('/mean')
arguments.append(str(self.players[self.login]["ladder_rating_mean"]))
arguments.append('/deviation')
arguments.append(str(self.players[self.login]["ladder_rating_deviation"]))
else :
#Player global rating
arguments.append('/mean')
arguments.append(str(self.players[self.login]["rating_mean"]))
arguments.append('/deviation')
arguments.append(str(self.players[self.login]["rating_deviation"]))
arguments.append('/country ') #Add country command line argument - Vicarian
country = self.getUserCountry(self.login) #Add country command line argument - Vicarian
arguments.append(str(country)) #Add country command line argument - Vicarian
clan = self.getUserClan(self.login)
if clan and galacticWar == False:
arguments.append('/clan')
arguments.append(clan)
# Ensure we have the map
if "mapname" in message:
fa.exe.checkMap(message['mapname'], force=True, silent=silent)
if galacticWar:
# in case of GW, we need to alter the scenario for support AIs
if not fa.maps.gwmap(message['mapname']):
logger.error("You don't have the required map.")
return
if "sim_mods" in message:
fa.exe.checkMods(message['sim_mods'])
# Writing a file for options
if "options" in message:
filename = os.path.join(util.CACHE_DIR, "options.lua")
options = QtCore.QFile(filename)
options.open(QtCore.QIODevice.WriteOnly | QtCore.QIODevice.Text)
numOpt = 0
options.write("Options = { ")
lenopt = len(message['options'])
for option in message['options'] :
if option == True :
options.write("'1'")
else :
options.write("'0'")
numOpt = numOpt + 1
if lenopt != numOpt :
options.write(", ")
options.write(" }")
options.close()
#Experimental UPnP Mapper - mappings are removed on app exit
if self.useUPnP:
fa.upnp.createPortMapping(self.localIP, self.gamePort, "UDP")
version_info = message.get('version_info', {})
version_info['lobby'] = util.VERSION_STRING
info = dict(uid=message['uid'], recorder=self.login, featured_mod=message[modkey], game_time=time.time(), version_info=version_info)
fa.exe.play(info, self.relayServer.serverPort(), self.gamelogs, arguments, galacticWar)
def stopTesting(self, success=False):
self.progress.close()
def runTesting(self):
'''
Performs a running of ForgedAlliance.exe for testing that everything is okay
'''
result = QtGui.QMessageBox.question(None, "Testing Proxies", "This will test if your computer is able to use the proxy server.<br>The proxy server is there to solve connections problems that can't be resolved otherwise.<br>Having it running correctly is extremely important.<br><br>FA will launch AND close automatically.<br><b>Please don't close it yourself.</b><br><br>The test can take up to 60 seconds!<br><br>If all you see when FA is launched is a black screen, you have an incorrect mod. The solution is to check your mods. <br><br>Launch the test?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if result != QtGui.QMessageBox.Yes:
return
self.progress.setWindowTitle("FAF is testing the proxy server")
self.progress.setLabelText("FA will launch and should close shortly after.")
self.progress.setMinimum(0)
self.progress.setMaximum(0)
self.progress.setValue(0)
self.progress.setCancelButton(None)
self.progress.show()
self.relayServer.testingProxy()
info = dict(uid= -1, recorder=self.login, featured_mod="faf", game_time=time.time())
fa.exe.play(info, self.relayServer.serverPort(), True)
started = time.time()
success = True
while self.progress.isVisible():
QtGui.QApplication.processEvents()
if time.time() - started > 60:
success = False
self.progress.close()
self.relayServer.stopTesting()
fa.exe.kill()
if success:
QtGui.QMessageBox.information(self, "Testing Proxy", "Proxy Server is running correctly!")
else:
if len(self.proxyServer.testedPorts) != 11:
nonreported = list(set(self.proxyServer.proxies).difference(self.proxyServer.testedPorts))
errorport = []
for port in nonreported:
errorport.append(self.proxyServer.proxies[port].localPort())
QtGui.QMessageBox.warning(self, "Testing Proxy Failed", "FA was unable to communicate locally with these ports :<br><br>" + "<br>".join(str(x) for x in errorport) + "<br><br>This is most likely due to your firewall blocking these port locally.<br>Please allow these UDP ports for IP 127.0.0.1")
elif len(self.proxyServer.testedLoopback) != 11:
nonreported = list(set(self.proxyServer.proxies).difference(self.proxyServer.testedLoopback))
errorport = []
for port in nonreported:
errorport.append(self.proxyServer.proxies[port].localPort())
QtGui.QMessageBox.warning(self, "Testing Proxy Failed", "The lobby didn't received any data from the proxy server for these ports :<br><br>" + "<br>".join(str(x) for x in errorport) + "<br><br>This is most likely due to your firewall blocking the proxy connection, or the proxy is offline.<br>")
else:
QtGui.QMessageBox.warning(self, "Testing Proxy Failed", "FA was unable to communicate locally with UDP ports 12001 to 12011.<br><br>This is most likely due to your firewall blocking these port locally.<br>Please allow these UDP ports for IP 127.0.0.1")
def handle_coop_info(self, message):
self.coopInfo.emit(message)
def handle_tournament_types_info(self, message):
self.tourneyTypesInfo.emit(message)
def handle_tournament_info(self, message):
self.tourneyInfo.emit(message)
def handle_tutorials_info(self, message):
self.tutorialsInfo.emit(message)
def handle_mod_info(self, message):
self.modInfo.emit(message)
def handle_game_info(self, message):
self.gameInfo.emit(message)
def handle_modvault_list_info(self, message):
modList = message["modList"]
for mod in modList:
self.handle_modvault_info(mod)
def handle_modvault_info(self, message):
self.modVaultInfo.emit(message)
def handle_replay_vault(self, message):
self.replayVault.emit(message)
def handle_coop_leaderboard(self, message):
self.coopLeaderBoard.emit(message)
def handle_ladder_maps(self, message):
self.ladderMapsList.emit(message)
def handle_matchmaker_info(self, message):
if "potential" in message:
if message["potential"] :
self.warningShow()
else:
self.warningHide()
def handle_avatar(self, message):
if "avatarlist" in message :
self.avatarList.emit(message["avatarlist"])
def handle_admin(self, message):
if "avatarlist" in message :
self.avatarList.emit(message["avatarlist"])
elif "player_avatar_list" in message :
self.playerAvatarList.emit(message)
def handle_social(self, message):
if "friends" in message:
self.friends = message["friends"]
self.usersUpdated.emit(self.players.keys())
self.friendList.updateFriendList()
if "foes" in message:
self.foes = message["foes"]
self.usersUpdated.emit(self.players.keys())
if "autojoin" in message:
self.autoJoin.emit(message["autojoin"])
if "power" in message:
self.power = message["power"]
self.manage_power()
if "channels" in message:
self.channelsUpdated.emit(message["channels"])
def handle_player_info(self, message):
name = message["login"]
self.players[name] = message
self.usersUpdated.emit([name])
def handle_mod_manager(self, message):
import functools
action = message["action"]
if action == "list" :
mods = message["mods"]
modMenu = self.menu.addMenu("Featured Mods Manager")
for mod in mods :
action = QtGui.QAction(mod, modMenu)
action.triggered.connect(functools.partial(self.featuredMod, mod))
modMenu.addAction(action)
def handle_mod_manager_info(self, message):
self.featuredModManagerInfo.emit(message)
def avatarManager(self):
self.requestAvatars(0)
self.avatarSelection.show()
def featuredMod(self, action):
self.featuredModManager.emit(action)
def handle_notice(self, message):
if "text" in message:
if message["style"] == "error" :
if self.state != ClientState.NONE :
QtGui.QMessageBox.critical(self, "Error from Server", message["text"])
else :
QtGui.QMessageBox.critical(self, "Login Failed", message["text"])
self.state = ClientState.REJECTED
elif message["style"] == "warning":
QtGui.QMessageBox.warning(self, "Warning from Server", message["text"])
elif message["style"] == "scores":
self.tray.showMessage("Scores", message["text"], QtGui.QSystemTrayIcon.Information, 3500)
self.localBroadcast.emit("Scores", message["text"])
else:
QtGui.QMessageBox.information(self, "Notice from Server", message["text"])
if message["style"] == "kill":
logger.info("Server has killed your Forged Alliance Process.")
fa.exe.kill()
if message["style"] == "kick":
logger.info("Server has kicked you from the Lobby.")
self.cleanup()
|
IDragonfire/modular-client
|
src/client/_clientwindow.py
|
Python
|
gpl-3.0
| 80,212
|
[
"VisIt"
] |
136abe5d136caa6fa95b942a758ce92f51a5e516899e2d93bd6f895c6d0dca17
|
import numpy as np
# -- ANN Ordering -------------------------------------------------------- -- #
def getNodeOrder(nodeG,connG):
"""Builds connection matrix from genome through topological sorting.
Args:
nodeG - (np_array) - node genes
[3 X nUniqueGenes]
[0,:] == Node Id
[1,:] == Type (1=input, 2=output 3=hidden 4=bias)
[2,:] == Activation function (as int)
connG - (np_array) - connection genes
[5 X nUniqueGenes]
[0,:] == Innovation Number (unique Id)
[1,:] == Source Node Id
[2,:] == Destination Node Id
[3,:] == Weight Value
[4,:] == Enabled?
Returns:
Q - [int] - sorted node order as indices
wMat - (np_array) - ordered weight matrix
[N X N]
OR
False, False - if cycle is found
Todo:
* setdiff1d is slow, as all numbers are positive ints is there a
better way to do with indexing tricks (as in quickINTersect)?
"""
conn = np.copy(connG)
node = np.copy(nodeG)
nIns = len(node[0,node[1,:] == 1]) + len(node[0,node[1,:] == 4])
nOuts = len(node[0,node[1,:] == 2])
# Create connection and initial weight matrices
conn[3,conn[4,:]==0] = np.nan # disabled but still connected
src = conn[1,:].astype(int)
dest = conn[2,:].astype(int)
lookup = node[0,:].astype(int)
for i in range(len(lookup)): # Can we vectorize this?
src[np.where(src==lookup[i])] = i
dest[np.where(dest==lookup[i])] = i
wMat = np.zeros((np.shape(node)[1],np.shape(node)[1]))
wMat[src,dest] = conn[3,:]
connMat = wMat[nIns+nOuts:,nIns+nOuts:]
connMat[connMat!=0] = 1
# Topological Sort of Hidden Nodes
edge_in = np.sum(connMat,axis=0)
Q = np.where(edge_in==0)[0] # Start with nodes with no incoming connections
for i in range(len(connMat)):
if (len(Q) == 0) or (i >= len(Q)):
Q = []
return False, False # Cycle found, can't sort
edge_out = connMat[Q[i],:]
edge_in = edge_in - edge_out # Remove nodes' conns from total
nextNodes = np.setdiff1d(np.where(edge_in==0)[0], Q)
Q = np.hstack((Q,nextNodes))
if sum(edge_in) == 0:
break
# Add In and outs back and reorder wMat according to sort
Q += nIns+nOuts
Q = np.r_[lookup[:nIns], Q, lookup[nIns:nIns+nOuts]]
wMat = wMat[np.ix_(Q,Q)]
return Q, wMat
def getLayer(wMat):
"""Get layer of each node in weight matrix
Traverse wMat by row, collecting layer of all nodes that connect to you (X).
Your layer is max(X)+1. Input and output nodes are ignored and assigned layer
0 and max(X)+1 at the end.
Args:
wMat - (np_array) - ordered weight matrix
[N X N]
Returns:
layer - [int] - layer # of each node
Todo:
* With very large networks this might be a performance sink -- especially,
given that this happen in the serial part of the algorithm. There is
probably a more clever way to do this given the adjacency matrix.
"""
wMat[np.isnan(wMat)] = 0
wMat[wMat!=0]=1
nNode = np.shape(wMat)[0]
layer = np.zeros((nNode))
while (True): # Loop until sorting is stable
prevOrder = np.copy(layer)
for curr in range(nNode):
srcLayer=np.zeros((nNode))
for src in range(nNode):
srcLayer[src] = layer[src]*wMat[src,curr]
layer[curr] = np.max(srcLayer)+1
if all(prevOrder==layer):
break
return layer-1
# -- ANN Activation ------------------------------------------------------ -- #
def act(weights, aVec, nInput, nOutput, inPattern):
"""Returns FFANN output given a single input pattern
If the variable weights is a vector it is turned into a square weight matrix.
Allows the network to return the result of several samples at once if given a matrix instead of a vector of inputs:
Dim 0 : individual samples
Dim 1 : dimensionality of pattern (# of inputs)
Args:
weights - (np_array) - ordered weight matrix or vector
[N X N] or [N**2]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
nInput - (int) - number of input nodes
nOutput - (int) - number of output nodes
inPattern - (np_array) - input activation
[1 X nInput] or [nSamples X nInput]
Returns:
output - (np_array) - output activation
[1 X nOutput] or [nSamples X nOutput]
"""
# Turn weight vector into weight matrix
if np.ndim(weights) < 2:
nNodes = int(np.sqrt(np.shape(weights)[0]))
wMat = np.reshape(weights, (nNodes, nNodes))
else:
nNodes = np.shape(weights)[0]
wMat = weights
wMat[np.isnan(wMat)]=0
# Vectorize input
if np.ndim(inPattern) > 1:
nSamples = np.shape(inPattern)[0]
else:
nSamples = 1
# Run input pattern through ANN
nodeAct = np.zeros((nSamples,nNodes))
nodeAct[:,0] = 1 # Bias activation
nodeAct[:,1:nInput+1] = inPattern
# Propagate signal through hidden to output nodes
iNode = nInput+1
for iNode in range(nInput+1,nNodes):
rawAct = np.dot(nodeAct, wMat[:,iNode]).squeeze()
nodeAct[:,iNode] = applyAct(aVec[iNode], rawAct)
#print(nodeAct)
output = nodeAct[:,-nOutput:]
return output
def applyAct(actId, x):
"""Returns value after an activation function is applied
Lookup table to allow activations to be stored in numpy arrays
case 1 -- Linear
case 2 -- Unsigned Step Function
case 3 -- Sin
case 4 -- Gausian with mean 0 and sigma 1
case 5 -- Hyperbolic Tangent [tanh] (signed)
case 6 -- Sigmoid unsigned [1 / (1 + exp(-x))]
case 7 -- Inverse
case 8 -- Absolute Value
case 9 -- Relu
case 10 -- Cosine
case 11 -- Squared
Args:
actId - (int) - key to look up table
x - (???) - value to be input into activation
[? X ?] - any type or dimensionality
Returns:
output - (float) - value after activation is applied
[? X ?] - same dimensionality as input
"""
if actId == 1: # Linear
value = x
if actId == 2: # Unsigned Step Function
value = 1.0*(x>0.0)
#value = (np.tanh(50*x/2.0) + 1.0)/2.0
elif actId == 3: # Sin
value = np.sin(np.pi*x)
elif actId == 4: # Gaussian with mean 0 and sigma 1
value = np.exp(-np.multiply(x, x) / 2.0)
elif actId == 5: # Hyperbolic Tangent (signed)
value = np.tanh(x)
elif actId == 6: # Sigmoid (unsigned)
value = (np.tanh(x/2.0) + 1.0)/2.0
elif actId == 7: # Inverse
value = -x
elif actId == 8: # Absolute Value
value = abs(x)
elif actId == 9: # Relu
value = np.maximum(0, x)
elif actId == 10: # Cosine
value = np.cos(np.pi*x)
elif actId == 11: # Squared
value = x**2
else:
value = x
return value
# -- Action Selection ---------------------------------------------------- -- #
def selectAct(action, actSelect):
"""Selects action based on vector of actions
Single Action:
- Hard: a single action is chosen based on the highest index
- Prob: a single action is chosen probablistically with higher values
more likely to be chosen
We aren't selecting a single action:
- Softmax: a softmax normalized distribution of values is returned
- Default: all actions are returned
Args:
action - (np_array) - vector weighting each possible action
[N X 1]
Returns:
i - (int) or (np_array) - chosen index
[N X 1]
"""
if actSelect == 'softmax':
action = softmax(action)
elif actSelect == 'prob':
action = weightedRandom(np.sum(action,axis=0))
else:
action = action.flatten()
return action
def softmax(x):
"""Compute softmax values for each sets of scores in x.
Assumes: [samples x dims]
Args:
x - (np_array) - unnormalized values
[samples x dims]
Returns:
softmax - (np_array) - softmax normalized in dim 1
Todo: Untangle all the transposes...
"""
if x.ndim == 1:
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
else:
e_x = np.exp(x.T - np.max(x,axis=1))
return (e_x / e_x.sum(axis=0)).T
def weightedRandom(weights):
"""Returns random index, with each choices chance weighted
Args:
weights - (np_array) - weighting of each choice
[N X 1]
Returns:
i - (int) - chosen index
"""
minVal = np.min(weights)
weights = weights - minVal # handle negative vals
cumVal = np.cumsum(weights)
pick = np.random.uniform(0, cumVal[-1])
for i in range(len(weights)):
if cumVal[i] >= pick:
return i
# -- File I/O ------------------------------------------------------------ -- #
def exportNet(filename,wMat, aVec):
indMat = np.c_[wMat,aVec]
np.savetxt(filename, indMat, delimiter=',',fmt='%1.2e')
def importNet(fileName):
ind = np.loadtxt(fileName, delimiter=',')
wMat = ind[:,:-1] # Weight Matrix
aVec = ind[:,-1] # Activation functions
# Create weight key
wVec = wMat.flatten()
wVec[np.isnan(wVec)]=0
wKey = np.where(wVec!=0)[0]
return wVec, aVec, wKey
|
google/brain-tokyo-workshop
|
WANNRelease/prettyNeatWann/neat_src/ann.py
|
Python
|
apache-2.0
| 9,221
|
[
"Gaussian"
] |
bb0998a73f87e7bdeac4c850cbf1a0e65f93f3a63a027f7ff676b891116232d8
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr15"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom15.phy", header=None, index=None)
print(tott.shape)
|
evanbiederstedt/RRBSfun
|
trees/chrom_scripts/normal_chr15.py
|
Python
|
mit
| 25,844
|
[
"MCell"
] |
9ae7cfa7ac0181a9fc0f43caa025b8ff0fc5a63253953cd297e62919f5c5858c
|
"""
Signal
======
The signal module constains all kinds of signal processing related functions.
.. inheritance-diagram:: acoustics.signal
Filtering
*********
.. autoclass:: Filterbank
.. autofunction:: bandpass_filter
.. autofunction:: octave_filter
.. autofunction:: bandpass
.. autofunction:: lowpass
.. autofunction:: highpass
.. autofunction:: octavepass
.. autofunction:: convolve
Windowing
*********
.. autofunction:: window_scaling_factor
.. autofunction:: apply_window
Spectra
*******
Different types of spectra exist.
.. autofunction:: amplitude_spectrum
.. autofunction:: auto_spectrum
.. autofunction:: power_spectrum
.. autofunction:: density_spectrum
.. autofunction:: angle_spectrum
.. autofunction:: phase_spectrum
Frequency bands
***************
.. autoclass:: Band
.. autoclass:: Frequencies
.. autoclass:: EqualBand
.. autoclass:: OctaveBand
.. autofunction:: integrate_bands
.. autofunction:: octaves
.. autofunction:: third_octaves
Hilbert transform
*****************
.. autofunction:: amplitude_envelope
.. autofunction:: instantaneous_phase
.. autofunction:: instantaneous_frequency
Conversion
**********
.. autofunction:: decibel_to_neper
.. autofunction:: neper_to_decibel
Other
*****
.. autofunction:: isolate
.. autofunction:: zero_crossings
.. autofunction:: rms
.. autofunction:: ms
.. autofunction:: normalize
.. autofunction:: ir2fr
.. autofunction:: wvd
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import spdiags
from scipy.signal import butter, lfilter, freqz, filtfilt, sosfilt
import acoustics.octave
#from acoustics.octave import REFERENCE
import acoustics.bands
from scipy.signal import hilbert
from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE
from acoustics.standards.iec_61672_1_2013 import (NOMINAL_OCTAVE_CENTER_FREQUENCIES,
NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES)
try:
from pyfftw.interfaces.numpy_fft import rfft
except ImportError:
from numpy.fft import rfft
def bandpass_filter(lowcut, highcut, fs, order=8, output='sos'):
"""Band-pass filter.
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param output: Output type. {'ba', 'zpk', 'sos'}. Default is 'sos'. See also :func:`scipy.signal.butter`.
:returns: Returned value depends on `output`.
A Butterworth filter is used.
.. seealso:: :func:`scipy.signal.butter`.
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
output = butter(order/2, [low, high], btype='band', output=output)
return output
def bandpass(signal, lowcut, highcut, fs, order=8, zero_phase=False):
"""Filter signal with band-pass filter.
:param signal: Signal
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`bandpass_filter` for the filter that is used.
"""
sos = bandpass_filter(lowcut, highcut, fs, order, output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def bandstop(signal, lowcut, highcut, fs, order=8, zero_phase=False):
"""Filter signal with band-stop filter.
:param signal: Signal
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
"""
return lowpass(signal, lowcut, fs, order=(order//2), zero_phase=zero_phase) + highpass(signal, highcut, fs, order=(order//2), zero_phase=zero_phase)
def lowpass(signal, cutoff, fs, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param signal: Signal
:param fs: Sample frequency
:param cutoff: Cut-off frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`scipy.signal.butter`.
"""
sos = butter(order, cutoff/(fs/2.0), btype='low', output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def highpass(signal, cutoff, fs, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param signal: Signal
:param fs: Sample frequency
:param cutoff: Cut-off frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`scipy.signal.butter`.
"""
sos = butter(order, cutoff/(fs/2.0), btype='high', output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def octave_filter(center, fs, fraction, order=8, output='sos'):
"""Fractional-octave band-pass filter.
:param center: Centerfrequency of fractional-octave band.
:param fs: Sample frequency
:param fraction: Fraction of fractional-octave band.
:param order: Filter order
:param output: Output type. {'ba', 'zpk', 'sos'}. Default is 'sos'. See also :func:`scipy.signal.butter`.
A Butterworth filter is used.
.. seealso:: :func:`bandpass_filter`
"""
ob = OctaveBand(center=center, fraction=fraction)
return bandpass_filter(ob.lower[0], ob.upper[0], fs, order, output=output)
def octavepass(signal, center, fs, fraction, order=8, zero_phase=True):
"""Filter signal with fractional-octave bandpass filter.
:param signal: Signal
:param center: Centerfrequency of fractional-octave band.
:param fs: Sample frequency
:param fraction: Fraction of fractional-octave band.
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`octave_filter`
"""
sos = octave_filter(center, fs, fraction, order)
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def convolve(signal, ltv, mode='full'):
"""
Perform convolution of signal with linear time-variant system ``ltv``.
:param signal: Vector representing input signal :math:`u`.
:param ltv: 2D array where each column represents an impulse response
:param mode: 'full', 'valid', or 'same'. See :func:`np.convolve` for an explanation of the options.
The convolution of two sequences is given by
.. math:: \mathbf{y} = \mathbf{t} \\star \mathbf{u}
This can be written as a matrix-vector multiplication
.. math:: \mathbf{y} = \mathbf{T} \\cdot \mathbf{u}
where :math:`T` is a Toeplitz matrix in which each column represents an impulse response.
In the case of a linear time-invariant (LTI) system, each column represents a time-shifted copy of the first column.
In the time-variant case (LTV), every column can contain a unique impulse response, both in values as in size.
This function assumes all impulse responses are of the same size.
The input matrix ``ltv`` thus represents the non-shifted version of the Toeplitz matrix.
.. seealso:: :func:`np.convolve`, :func:`scipy.signal.convolve` and :func:`scipy.signal.fftconvolve` for convolution with LTI system.
"""
assert(len(signal) == ltv.shape[1])
n = ltv.shape[0] + len(signal) - 1 # Length of output vector
un = np.concatenate((signal, np.zeros(ltv.shape[0] - 1))) # Resize input vector
offsets = np.arange(0, -ltv.shape[0], -1) # Offsets for impulse responses
Cs = spdiags(ltv, offsets, n, n) # Sparse representation of IR's.
out = Cs.dot(un) # Calculate dot product.
if mode=='full':
return out
elif mode=='same':
start = ltv.shape[0]/2 - 1 + ltv.shape[0]%2
stop = len(signal) + ltv.shape[0]/2 - 1 + ltv.shape[0]%2
return out[start:stop]
elif mode=='valid':
length = len(signal) - ltv.shape[0]
start = ltv.shape[0] - 1
stop = len(signal)
return out[start:stop]
def ir2fr(ir, fs, N=None):
"""
Convert impulse response into frequency response. Returns single-sided RMS spectrum.
:param ir: Impulser response
:param fs: Sample frequency
:param N: Blocks
Calculates the positive frequencies using :func:`np.fft.rfft`.
Corrections are then applied to obtain the single-sided spectrum.
.. note:: Single-sided spectrum. Therefore, the amount of bins returned is either N/2 or N/2+1.
"""
#ir = ir - np.mean(ir) # Remove DC component.
N = N if N else ir.shape[-1]
fr = rfft(ir, n=N) / N
f = np.fft.rfftfreq(N, 1.0/fs) #/ 2.0
fr *= 2.0
fr[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
fr[..., -1] /= 2.0 # And neither should fs/2 be.
#f = np.arange(0, N/2+1)*(fs/N)
return f, fr
def decibel_to_neper(decibel):
"""
Convert decibel to neper.
:param decibel: Value in decibel (dB).
:returns: Value in neper (Np).
The conversion is done according to
.. math :: \\mathrm{dB} = \\frac{\\log{10}}{20} \\mathrm{Np}
"""
return np.log(10.0) / 20.0 * decibel
def neper_to_decibel(neper):
"""
Convert neper to decibel.
:param neper: Value in neper (Np).
:returns: Value in decibel (dB).
The conversion is done according to
.. math :: \\mathrm{Np} = \\frac{20}{\\log{10}} \\mathrm{dB}
"""
return 20.0 / np.log(10.0) * neper
class Frequencies(object):
"""
Object describing frequency bands.
"""
def __init__(self, center, lower, upper, bandwidth=None):
self.center = np.asarray(center)
"""
Center frequencies.
"""
self.lower = np.asarray(lower)
"""
Lower frequencies.
"""
self.upper = np.asarray(upper)
"""
Upper frequencies.
"""
self.bandwidth = np.asarray(bandwidth) if bandwidth is not None else np.asarray(self.upper) - np.asarray(self.lower)
"""
Bandwidth.
"""
def __iter__(self):
for i in range(len(self.center)):
yield self[i]
def __len__(self):
return len(self.center)
def __str__(self):
return str(self.center)
def __repr__(self):
return "Frequencies({})".format(str(self.center))
def angular(self):
"""Angular center frequency in radians per second.
"""
return 2.0 * np.pi * self.center
class EqualBand(Frequencies):
"""
Equal bandwidth spectrum. Generally used for narrowband data.
"""
def __init__(self, center=None, fstart=None, fstop=None, nbands=None, bandwidth=None):
"""
:param center: Vector of center frequencies.
:param fstart: First center frequency.
:param fstop: Last center frequency.
:param nbands: Amount of frequency bands.
:param bandwidth: Bandwidth of bands.
"""
if center is not None:
try:
nbands = len(center)
except TypeError:
center = [center]
nbands = 1
u = np.unique(np.diff(center).round(decimals=3))
n = len(u)
if n == 1:
bandwidth = u
elif n > 1:
raise ValueError("Given center frequencies are not equally spaced.")
else:
pass
fstart = center[0] #- bandwidth/2.0
fstop = center[-1] #+ bandwidth/2.0
elif fstart is not None and fstop is not None and nbands:
bandwidth = (fstop - fstart) / (nbands-1)
elif fstart is not None and fstop is not None and bandwidth:
nbands = round((fstop - fstart) / bandwidth) + 1
elif fstart is not None and bandwidth and nbands:
fstop = fstart + nbands * bandwidth
elif fstop is not None and bandwidth and nbands:
fstart = fstop - (nbands-1) * bandwidth
else:
raise ValueError("Insufficient parameters. Cannot determine fstart, fstop, bandwidth.")
center = fstart + np.arange(0, nbands) * bandwidth # + bandwidth/2.0
upper = fstart + np.arange(0, nbands) * bandwidth + bandwidth/2.0
lower = fstart + np.arange(0, nbands) * bandwidth - bandwidth/2.0
super(EqualBand, self).__init__(center, lower, upper, bandwidth)
def __getitem__(self, key):
return type(self)(center=self.center[key], bandwidth=self.bandwidth)
def __repr__(self):
return "EqualBand({})".format(str(self.center))
class OctaveBand(Frequencies):
"""Fractional-octave band spectrum.
"""
def __init__(self, center=None, fstart=None, fstop=None, nbands=None, fraction=1, reference=acoustics.octave.REFERENCE):
if center is not None:
try:
nbands = len(center)
except TypeError:
center = [center]
center = np.asarray(center)
indices = acoustics.octave.index_of_frequency(center, fraction=fraction, ref=reference)
elif fstart is not None and fstop is not None:
nstart = acoustics.octave.index_of_frequency(fstart, fraction=fraction, ref=reference)
nstop = acoustics.octave.index_of_frequency(fstop, fraction=fraction, ref=reference)
indices = np.arange(nstart, nstop+1)
elif fstart is not None and nbands is not None:
nstart = acoustics.octave.index_of_frequency(fstart, fraction=fraction, ref=reference)
indices = np.arange(nstart, nstart+nbands)
elif fstop is not None and nbands is not None:
nstop = acoustics.octave.index_of_frequency(fstop, fraction=fraction, ref=reference)
indices = np.arange(nstop-nbands, nstop)
else:
raise ValueError("Insufficient parameters. Cannot determine fstart and/or fstop.")
center = acoustics.octave.exact_center_frequency(None, fraction=fraction, n=indices, ref=reference)
lower = acoustics.octave.lower_frequency(center, fraction=fraction)
upper = acoustics.octave.upper_frequency(center, fraction=fraction)
bandwidth = upper - lower
nominal = acoustics.octave.nominal_center_frequency(None, fraction, indices)
super(OctaveBand, self).__init__(center, lower, upper, bandwidth)
self.fraction = fraction
"""Fraction of fractional-octave filter.
"""
self.reference = reference
"""Reference center frequency.
"""
self.nominal = nominal
"""Nominal center frequencies.
"""
def __getitem__(self, key):
return type(self)(center=self.center[key], fraction=self.fraction, reference=self.reference)
def __repr__(self):
return "OctaveBand({})".format(str(self.center))
def ms(x):
"""Mean value of signal `x` squared.
:param x: Dynamic quantity.
:returns: Mean squared of `x`.
"""
return (np.abs(x)**2.0).mean()
def rms(x):
"""Root mean squared of signal `x`.
:param x: Dynamic quantity.
.. math:: x_{rms} = lim_{T \\to \\infty} \\sqrt{\\frac{1}{T} \int_0^T |f(x)|^2 \\mathrm{d} t }
:seealso: :func:`ms`.
"""
return np.sqrt(ms(x))
def normalize(y, x=None):
"""normalize power in y to a (standard normal) white noise signal.
Optionally normalize to power in signal `x`.
#The mean power of a Gaussian with :math:`\\mu=0` and :math:`\\sigma=1` is 1.
"""
#return y * np.sqrt( (np.abs(x)**2.0).mean() / (np.abs(y)**2.0).mean() )
if x is not None:
x = ms(x)
else:
x = 1.0
return y * np.sqrt( x / ms(y) )
#return y * np.sqrt( 1.0 / (np.abs(y)**2.0).mean() )
## Broken? Caused correlation in auralizations....weird!
def window_scaling_factor(window, axis=-1):
"""
Calculate window scaling factor.
:param window: Window.
When analysing broadband (filtered noise) signals it is common to normalize
the windowed signal so that it has the same power as the un-windowed one.
.. math:: S = \\sqrt{\\frac{\\sum_{i=0}^N w_i^2}{N}}
"""
return np.sqrt((window*window).mean(axis=axis))
def apply_window(x, window):
"""
Apply window to signal.
:param x: Instantaneous signal :math:`x(t)`.
:param window: Vector representing window.
:returns: Signal with window applied to it.
.. math:: x_s(t) = x(t) / S
where :math:`S` is the window scaling factor.
.. seealso:: :func:`window_scaling_factor`.
"""
s = window_scaling_factor(window) # Determine window scaling factor.
n = len(window)
windows = x//n # Amount of windows.
x = x[0:windows*n] # Truncate final part of signal that does not fit.
#x = x.reshape(-1, len(window)) # Reshape so we can apply window.
y = np.tile(window, windows)
return x * y / s
def amplitude_spectrum(x, fs, N=None):
"""
Amplitude spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The amplitude spectrum gives the amplitudes of the sinusoidal the signal is built
up from, and the RMS (root-mean-square) amplitudes can easily be found by dividing
these amplitudes with :math:`\\sqrt{2}`.
The amplitude spectrum is double-sided.
"""
N = N if N else x.shape[-1]
fr = np.fft.fft(x, n=N) / N
f = np.fft.fftfreq(N, 1.0/fs)
return np.fft.fftshift(f), np.fft.fftshift(fr, axes=[-1])
def auto_spectrum(x, fs, N=None):
"""
Auto-spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The auto-spectrum contains the squared amplitudes of the signal. Squared amplitudes
are used when presenting data as it is a measure of the power/energy in the signal.
.. math:: S_{xx} (f_n) = \\overline{X (f_n)} \\cdot X (f_n)
The auto-spectrum is double-sided.
"""
f, a = amplitude_spectrum(x, fs, N=N)
return f, (a*a.conj()).real
def power_spectrum(x, fs, N=None):
"""
Power spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The power spectrum, or single-sided autospectrum, contains the squared RMS amplitudes of the signal.
A power spectrum is a spectrum with squared RMS values. The power spectrum is
calculated from the autospectrum of the signal.
.. warning:: Does not include scaling to reference value!
.. seealso:: :func:`auto_spectrum`
"""
N = N if N else x.shape[-1]
f, a = auto_spectrum(x, fs, N=N)
a = a[..., N//2:]
f = f[..., N//2:]
a *= 2.0
a[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
a[..., -1] /= 2.0 # And neither should fs/2 be.
return f, a
def angle_spectrum(x, fs, N=None):
"""
Phase angle spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided wrapped phase angle spectrum.
.. seealso:: :func:`phase_spectrum` for unwrapped phase spectrum.
"""
N = N if N else x.shape[-1]
f, a = amplitude_spectrum(x, fs, N)
a = np.angle(a)
a = a[..., N//2:]
f = f[..., N//2:]
return f, a
def phase_spectrum(x, fs, N=None):
"""
Phase spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided unwrapped phase spectrum.
.. seealso:: :func:`angle_spectrum` for wrapped phase angle.
"""
f, a = angle_spectrum(x, fs, N=None)
return f, np.unwrap(a)
#def power_and_phase_spectrum(x, fs, N=None):
#"""
#Power spectrum and phase of instantaneous signal :math:`x(t)`.
#:param x: Instantaneous signal :math:`x(t)`.
#:param fs: Sample frequency :math:`f_s`.
#:param N: Amount of FFT bins.
#Often one is interested in both the power spectrum and the phase. This function returns the power and a single-sided phase spectrum.
#For an explanation of the power spectrum, see :func:`power_spectrum`.
#"""
#returns f, power, phase
def density_spectrum(x, fs, N=None):
"""
Density spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
A density spectrum considers the amplitudes per unit frequency.
Density spectra are used to compare spectra with different frequency resolution as the
magnitudes are not influenced by the resolution because it is per Hertz. The amplitude
spectra on the other hand depend on the chosen frequency resolution.
"""
N = N if N else x.shape[-1]
fr = np.fft.fft(x, n=N) / fs
f = np.fft.fftfreq(N, 1.0/fs)
return np.fft.fftshift(f), np.fft.fftshift(fr)
#def auto_density_spectrum(x, fs, N=None):
#"""
#Auto density spectrum of instantaneous signal :math:`x(t)`.
#"""
#f, d = density_spectrum(x, fs, N=N)
#return f, (d*d.conj()).real
#def power_density_spectrum(x, fs, N=None):
#"""
#Power density spectrum.
#"""
#N = N if N else x.shape[-1]
#f, a = auto_density_spectrum(x, fs, N=N)
#a = a[N//2:]
#f = f[N//2:]
#a *= 2.0
#a[..., 0] /= 2.0 # DC component should not be doubled.
#if not N%2: # if not uneven
#a[..., -1] /= 2.0 # And neither should fs/2 be.
#return f, a
def integrate_bands(data, a, b):
"""
Reduce frequency resolution of power spectrum. Merges frequency bands by integration.
:param data: Vector with narrowband powers.
:param a: Instance of :class:`Frequencies`.
:param b: Instance of :class:`Frequencies`.
.. note:: Needs rewriting so that the summation goes over axis=1.
"""
try:
if b.fraction%a.fraction:
raise NotImplementedError("Non-integer ratio of fractional-octaves are not supported.")
except AttributeError:
pass
lower, _ = np.meshgrid(b.lower, a.center)
upper, _ = np.meshgrid(b.upper, a.center)
_, center= np.meshgrid(b.center, a.center)
return ((lower < center) * (center <= upper) * data[...,None]).sum(axis=-2)
def bandpass_frequencies(x, fs, frequencies, order=8, purge=False, zero_phase=False):
""""Apply bandpass filters for frequencies
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies. Instance of :class:`Frequencies`.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
"""
if purge:
frequencies = frequencies[frequencies.upper < fs/2.0]
return frequencies, np.array([bandpass(x, band.lower, band.upper, fs, order, zero_phase=zero_phase) for band in frequencies])
def bandpass_octaves(x, fs, frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES, order=8, purge=False, zero_phase=False):
"""Apply 1/1-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
return bandpass_fractional_octaves(x, fs, frequencies, fraction=1, order=order, purge=purge, zero_phase=zero_phase)
def bandpass_third_octaves(x, fs, frequencies=NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES, order=8, purge=False, zero_phase=False):
"""Apply 1/3-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
return bandpass_fractional_octaves(x, fs, frequencies, fraction=3, order=order, purge=purge, zero_phase=zero_phase)
def bandpass_fractional_octaves(x, fs, frequencies, fraction=None, order=8, purge=False, zero_phase=False):
"""Apply 1/N-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies. Either instance of :class:`OctaveBand`, or array along with fs.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
if not isinstance(frequencies, Frequencies):
frequencies = OctaveBand(center=frequencies, fraction=fraction)
return bandpass_frequencies(x, fs, frequencies, order=order, purge=purge, zero_phase=zero_phase)
def third_octaves(p, fs, density=False,
frequencies=NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES,
ref=REFERENCE_PRESSURE):
"""Calculate level per 1/3-octave in frequency domain using the FFT.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. seealso:: :attr:`acoustics.bands.THIRD_OCTAVE_CENTER_FREQUENCIES`
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(center=frequencies, fraction=3)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power / ref**2.0)
return fob, level
def octaves(p, fs, density=False,
frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES,
ref=REFERENCE_PRESSURE):
"""Calculate level per 1/1-octave in frequency domain using the FFT.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:param frequencies: Frequencies.
:param ref: Reference value.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. seealso:: :attr:`acoustics.bands.OCTAVE_CENTER_FREQUENCIES`
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(center=frequencies, fraction=1)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power / ref**2.0)
return fob, level
def fractional_octaves(p, fs, start=5.0, stop=16000.0, fraction=3, density=False):
"""Calculate level per 1/N-octave in frequency domain using the FFT. N is `fraction`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. note:: This function does *not* use nominal center frequencies.
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(fstart=start, fstop=stop, fraction=fraction)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power)
return fob, level
class Filterbank(object):
"""
Fractional-Octave filter bank.
.. warning:: For high frequencies the filter coefficients are wrong for low frequencies. Therefore, to improve the response for lower frequencies the signal should be downsampled. Currently, there is no easy way to do so within the Filterbank.
"""
def __init__(self, frequencies, sample_frequency=44100, order=8):
self.frequencies = frequencies
"""
Frequencies object.
See also :class:`Frequencies` and subclasses.
.. note:: A frequencies object should have the attributes center, lower and upper.
"""
self.order = order
"""
Filter order of Butterworth filter.
"""
self.sample_frequency = sample_frequency
"""
Sample frequency.
"""
@property
def sample_frequency(self):
"""
Sample frequency.
"""
return self._sample_frequency
@sample_frequency.setter
def sample_frequency(self, x):
#if x <= self.center_frequencies.max():
#raise ValueError("Sample frequency cannot be lower than the highest center frequency.")
self._sample_frequency = x
@property
def filters(self):
"""
Filters this filterbank consists of.
"""
fs = self.sample_frequency
return ( bandpass_filter(lower, upper, fs, order=self.order, output='sos') for lower, upper in zip(self.frequencies.lower, self.frequencies.upper) )
#order = self.order
#filters = list()
#nyq = self.sample_frequency / 2.0
#return ( butter(order, [lower/nyq, upper/nyq], btype='band', analog=False) for lower, upper in zip(self.frequencies.lower, self.frequencies.upper) )
def lfilter(self, signal):
"""
Filter signal with filterbank.
.. note:: This function uses :func:`scipy.signal.lfilter`.
"""
return ( sosfilt(sos, signal) for sos in self.filters )
def filtfilt(self, signal):
"""
Filter signal with filterbank.
Returns a list consisting of a filtered signal per filter.
.. note:: This function uses :func:`scipy.signal.filtfilt` and therefore has a zero-phase response.
"""
return ( _sosfiltfilt(sos, signal) for sos in self.filters )
def power(self, signal):
"""
Power per band in signal.
"""
filtered = self.filtfilt(signal)
return np.array([(x**2.0).sum()/len(x) / bw for x, bw in zip(filtered, self.frequencies.bandwidth)])
def plot_response(self):
"""
Plot frequency response.
.. note:: The follow phase response is obtained in case :meth:`lfilter` is used. The method :meth:`filtfilt` results in a zero-phase response.
"""
fs = self.sample_frequency
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
for f, fc in zip(self.filters, self.frequencies.center):
w, h = freqz(f[0], f[1], int(fs/2))#np.arange(fs/2.0))
ax1.semilogx(w / (2.0*np.pi) * fs, 20.0 * np.log10(np.abs(h)), label=str(int(fc)))
ax2.semilogx(w / (2.0*np.pi) * fs, np.angle(h), label=str(int(fc)))
ax1.set_xlabel(r'$f$ in Hz')
ax1.set_ylabel(r'$|H|$ in dB re. 1')
ax2.set_xlabel(r'$f$ in Hz')
ax2.set_ylabel(r'$\angle H$ in rad')
ax1.legend(loc=5)
ax2.legend(loc=5)
ax1.set_ylim(-60.0, +10.0)
return fig
def plot_power(self, signal):
"""
Plot power in signal.
"""
f = self.frequencies.center
p = self.power(signal)
fig = plt.figure()
ax = fig.add_subplot(111)
p = ax.bar(f, 20.0*np.log10(p))
ax.set_xlabel('$f$ in Hz')
ax.set_ylabel('$L$ in dB re. 1')
ax.set_xscale('log')
return fig
#class FilterbankFFT(object):
#"""
#Filterbank to filter signal using FFT.
#"""
#def __init__(self, frequencies, sample_frequency=44100):
#self.frequencies = frequencies
#"""
#Frequencies.
#See also :class:`Frequencies` and subclasses.
#"""
#self.sample_frequency = sample_frequency
#def power(self, signal):
#pass
#def plot_power(self, signal):
#pass
def isolate(signals):
"""Isolate signals.
:param signals: Array of shape N x M where N is the amount of samples and M the amount of signals. Thus, each column is a signal.
:returns: Array of isolated signals. Each column is a signal.
Isolate signals using Singular Value Decomposition.
"""
x = np.asarray(signals)
W, s, v = np.linalg.svd( (np.tile( (x*x).sum(axis=0), (len(x), 1) ) * x).dot(x.T) )
return v.T
def zero_crossings(data):
"""
Determine the positions of zero crossings in `data`.
:param data: Vector
:returns: Vector with indices of samples *before* the zero crossing.
"""
pos = data > 0
npos = ~pos
return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]
def amplitude_envelope(signal, fs):
"""Instantaneous amplitude of tone.
The instantaneous amplitude is the magnitude of the analytic signal.
.. seealso:: :func:`scipy.signal.hilbert`
"""
return np.abs(hilbert(signal))
def instantaneous_phase(signal, fs):
"""Instantaneous phase of tone.
The instantaneous phase is the angle of the analytic signal.
This function returns a wrapped angle.
.. seealso:: :func:`scipy.signal.hilbert`
"""
return np.angle(hilbert(signal))
def instantaneous_frequency(signal, fs):
"""Determine instantaneous frequency of tone.
The instantaneous frequency can be obtained by differentiating the unwrapped instantaneous phase.
.. seealso:: :func:`instantaneous_phase`
"""
return np.diff( np.unwrap(instantaneous_phase(signal, fs))) / (2.0*np.pi) * fs
def wvd(signal, fs, analytic=True):
"""Wigner-Ville Distribution
:param signal: Signal
:param fs: Sample frequency
:param analytic: Use the analytic signal, calculated using Hilbert transform.
.. math:: W_z(n, \\omega) = 2 \\sum_k z^*[n-k]z[n+k] e^{-j\\omega 2kT}
Includes positive and negative frequencies.
"""
signal = np.asarray(signal)
N = int(len(signal)+len(signal)%2)
length_FFT = N # Take an even value of N
#if N != len(signal):
# signal = np.concatenate(signal, [0])
length_time = len(signal)
if analytic:
signal = hilbert(signal)
s = np.concatenate((np.zeros(length_time), signal, np.zeros(length_time)))
W = np.zeros((length_FFT,length_time))
tau = np.arange(0, N//2)
R = np.zeros((N, length_time), dtype='float64')
i = length_time
for t in range(length_time):
R[t, tau1] = ( s[i+tau] * s[i-tau].conj() ) # In one direction
R[t, N-(tau+1)] = R[t, tau+1].conj() # And the other direction
i += 1
W = np.fft.fft(R, length_FFT) / (2*length_FFT)
f = np.fft.fftfreq(N, 1./fs)
return f, W.T
def _sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None):
"""Filtfilt version using Second Order sections. Code is taken from scipy.signal.filtfilt and adapted to make it work with SOS.
Note that broadcasting does not work.
"""
from scipy.signal import sosfilt_zi
from scipy.signal._arraytools import odd_ext, axis_slice, axis_reverse
x = np.asarray(x)
if padlen is None:
edge = 0
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = sosfilt_zi(sos)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
#zi_shape = [1] * x.ndim
#zi_shape[axis] = zi.size
#zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
from scipy.signal import lti, cheby1, firwin
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=False):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
zero_phase : bool
Prevent phase shift by filtering with ``filtfilt`` instead of ``lfilter``.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
Notes
-----
The ``zero_phase`` keyword was added in 0.17.0.
The possibility to use instances of ``lti`` as ``ftype`` was added in 0.17.0.
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if ftype == 'fir':
if n is None:
n = 30
system = lti(firwin(n + 1, 1. / q, window='hamming'), 1.)
elif ftype == 'iir':
if n is None:
n = 8
system = lti(*cheby1(n, 0.05, 0.8 / q))
else:
system = ftype
if zero_phase:
y = filtfilt(system.num, system.den, x, axis=axis)
else:
y = lfilter(system.num, system.den, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
def impulse_response_real_even(tf, ntaps):
"""The impulse response of a real and even frequency response is also real and even.
:param tf: Real and even frequency response. Only positive frequencies.
:param ntaps: Amount of taps.
:returns: A real and even (double-sided) impulse response with length `ntaps`.
A symmetric impulse response is needed. The center of symmetry determines the delay of the filter and thereby whether the filter is causal (delay>0, linear-phase) or non-causal (delay=0, linear-phase, zero-phase).
https://ccrma.stanford.edu/~jos/filters/Zero_Phase_Filters_Even_Impulse.html
"""
ir = np.fft.ifftshift(np.fft.irfft(tf, n=ntaps)).real
return ir
__all__ = ['bandpass',
'bandpass_frequencies',
'bandpass_fractional_octaves',
'bandpass_octaves',
'bandpass_third_octaves',
'lowpass',
'highpass',
'octavepass',
'octave_filter',
'bandpass_filter',
'convolve',
'ir2fr',
'decibel_to_neper',
'neper_to_decibel',
'EqualBand',
'OctaveBand',
'ms',
'rms',
'normalize',
'window_scaling_factor',
'apply_window',
'amplitude_spectrum',
'auto_spectrum',
'power_spectrum',
'angle_spectrum',
'phase_spectrum',
'density_spectrum',
'integrate_bands',
'octaves',
'third_octaves',
'fractional_octaves',
'Filterbank',
'isolate',
'zero_crossings',
'amplitude_envelope',
'instantaneous_phase',
'instantaneous_frequency',
'wvd',
'decimate',
]
|
FRidh/python-acoustics
|
acoustics/signal.py
|
Python
|
bsd-3-clause
| 41,843
|
[
"Gaussian"
] |
625996ce54d8384791840bb21309f0b456c0dd32d41bf2f72e661a4e044d8c5f
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.region_url_maps import RegionUrlMapsClient
from google.cloud.compute_v1.services.region_url_maps import pagers
from google.cloud.compute_v1.services.region_url_maps import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RegionUrlMapsClient._get_default_mtls_endpoint(None) is None
assert (
RegionUrlMapsClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
RegionUrlMapsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
RegionUrlMapsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionUrlMapsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionUrlMapsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionUrlMapsClient, "rest"),]
)
def test_region_url_maps_client_from_service_account_info(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name", [(transports.RegionUrlMapsRestTransport, "rest"),]
)
def test_region_url_maps_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionUrlMapsClient, "rest"),]
)
def test_region_url_maps_client_from_service_account_file(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_region_url_maps_client_get_transport_class():
transport = RegionUrlMapsClient.get_transport_class()
available_transports = [
transports.RegionUrlMapsRestTransport,
]
assert transport in available_transports
transport = RegionUrlMapsClient.get_transport_class("rest")
assert transport == transports.RegionUrlMapsRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest"),],
)
@mock.patch.object(
RegionUrlMapsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionUrlMapsClient),
)
def test_region_url_maps_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(RegionUrlMapsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(RegionUrlMapsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest", "true"),
(RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest", "false"),
],
)
@mock.patch.object(
RegionUrlMapsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionUrlMapsClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_region_url_maps_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [RegionUrlMapsClient])
@mock.patch.object(
RegionUrlMapsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionUrlMapsClient),
)
def test_region_url_maps_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest"),],
)
def test_region_url_maps_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[(RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest", None),],
)
def test_region_url_maps_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("request_type", [compute.DeleteRegionUrlMapRequest, dict,])
def test_delete_unary_rest(request_type):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteRegionUrlMapRequest,
):
transport_class = transports.RegionUrlMapsRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["url_map"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["urlMap"] = "url_map_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "urlMap" in jsonified_request
assert jsonified_request["urlMap"] == "url_map_value"
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "region", "urlMap",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_delete_unary_rest_interceptors(null_interceptor):
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionUrlMapsRestInterceptor(),
)
client = RegionUrlMapsClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "post_delete"
) as post, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "pre_delete"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.DeleteRegionUrlMapRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.delete_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteRegionUrlMapRequest
):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"url_map": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", region="region_value", url_map="url_map_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteRegionUrlMapRequest(),
project="project_value",
region="region_value",
url_map="url_map_value",
)
def test_delete_unary_rest_error():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.GetRegionUrlMapRequest, dict,])
def test_get_rest(request_type):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.UrlMap(
creation_timestamp="creation_timestamp_value",
default_service="default_service_value",
description="description_value",
fingerprint="fingerprint_value",
id=205,
kind="kind_value",
name="name_value",
region="region_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMap.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.UrlMap)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.default_service == "default_service_value"
assert response.description == "description_value"
assert response.fingerprint == "fingerprint_value"
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.region == "region_value"
assert response.self_link == "self_link_value"
def test_get_rest_required_fields(request_type=compute.GetRegionUrlMapRequest):
transport_class = transports.RegionUrlMapsRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["url_map"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["urlMap"] = "url_map_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "urlMap" in jsonified_request
assert jsonified_request["urlMap"] == "url_map_value"
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.UrlMap()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMap.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("project", "region", "urlMap",)))
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionUrlMapsRestInterceptor(),
)
client = RegionUrlMapsClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.UrlMap.to_json(compute.UrlMap())
request = compute.GetRegionUrlMapRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.UrlMap
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetRegionUrlMapRequest
):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.UrlMap()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"url_map": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", region="region_value", url_map="url_map_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMap.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetRegionUrlMapRequest(),
project="project_value",
region="region_value",
url_map="url_map_value",
)
def test_get_rest_error():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.InsertRegionUrlMapRequest, dict,])
def test_insert_unary_rest(request_type):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["url_map_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"default_route_action": {
"cors_policy": {
"allow_credentials": True,
"allow_headers": ["allow_headers_value_1", "allow_headers_value_2"],
"allow_methods": ["allow_methods_value_1", "allow_methods_value_2"],
"allow_origin_regexes": [
"allow_origin_regexes_value_1",
"allow_origin_regexes_value_2",
],
"allow_origins": ["allow_origins_value_1", "allow_origins_value_2"],
"disabled": True,
"expose_headers": ["expose_headers_value_1", "expose_headers_value_2"],
"max_age": 722,
},
"fault_injection_policy": {
"abort": {"http_status": 1219, "percentage": 0.10540000000000001},
"delay": {
"fixed_delay": {"nanos": 543, "seconds": 751},
"percentage": 0.10540000000000001,
},
},
"max_stream_duration": {},
"request_mirror_policy": {"backend_service": "backend_service_value"},
"retry_policy": {
"num_retries": 1197,
"per_try_timeout": {},
"retry_conditions": [
"retry_conditions_value_1",
"retry_conditions_value_2",
],
},
"timeout": {},
"url_rewrite": {
"host_rewrite": "host_rewrite_value",
"path_prefix_rewrite": "path_prefix_rewrite_value",
},
"weighted_backend_services": [
{
"backend_service": "backend_service_value",
"header_action": {
"request_headers_to_add": [
{
"header_name": "header_name_value",
"header_value": "header_value_value",
"replace": True,
}
],
"request_headers_to_remove": [
"request_headers_to_remove_value_1",
"request_headers_to_remove_value_2",
],
"response_headers_to_add": {},
"response_headers_to_remove": [
"response_headers_to_remove_value_1",
"response_headers_to_remove_value_2",
],
},
"weight": 648,
}
],
},
"default_service": "default_service_value",
"default_url_redirect": {
"host_redirect": "host_redirect_value",
"https_redirect": True,
"path_redirect": "path_redirect_value",
"prefix_redirect": "prefix_redirect_value",
"redirect_response_code": "redirect_response_code_value",
"strip_query": True,
},
"description": "description_value",
"fingerprint": "fingerprint_value",
"header_action": {},
"host_rules": [
{
"description": "description_value",
"hosts": ["hosts_value_1", "hosts_value_2"],
"path_matcher": "path_matcher_value",
}
],
"id": 205,
"kind": "kind_value",
"name": "name_value",
"path_matchers": [
{
"default_route_action": {},
"default_service": "default_service_value",
"default_url_redirect": {},
"description": "description_value",
"header_action": {},
"name": "name_value",
"path_rules": [
{
"paths": ["paths_value_1", "paths_value_2"],
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
"route_rules": [
{
"description": "description_value",
"header_action": {},
"match_rules": [
{
"full_path_match": "full_path_match_value",
"header_matches": [
{
"exact_match": "exact_match_value",
"header_name": "header_name_value",
"invert_match": True,
"prefix_match": "prefix_match_value",
"present_match": True,
"range_match": {
"range_end": 931,
"range_start": 1178,
},
"regex_match": "regex_match_value",
"suffix_match": "suffix_match_value",
}
],
"ignore_case": True,
"metadata_filters": [
{
"filter_labels": [
{
"name": "name_value",
"value": "value_value",
}
],
"filter_match_criteria": "filter_match_criteria_value",
}
],
"prefix_match": "prefix_match_value",
"query_parameter_matches": [
{
"exact_match": "exact_match_value",
"name": "name_value",
"present_match": True,
"regex_match": "regex_match_value",
}
],
"regex_match": "regex_match_value",
}
],
"priority": 898,
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
}
],
"region": "region_value",
"self_link": "self_link_value",
"tests": [
{
"description": "description_value",
"expected_output_url": "expected_output_url_value",
"expected_redirect_response_code": 3275,
"headers": [{"name": "name_value", "value": "value_value"}],
"host": "host_value",
"path": "path_value",
"service": "service_value",
}
],
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertRegionUrlMapRequest,
):
transport_class = transports.RegionUrlMapsRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "region", "urlMapResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_insert_unary_rest_interceptors(null_interceptor):
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionUrlMapsRestInterceptor(),
)
client = RegionUrlMapsClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "post_insert"
) as post, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "pre_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.InsertRegionUrlMapRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertRegionUrlMapRequest
):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["url_map_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"default_route_action": {
"cors_policy": {
"allow_credentials": True,
"allow_headers": ["allow_headers_value_1", "allow_headers_value_2"],
"allow_methods": ["allow_methods_value_1", "allow_methods_value_2"],
"allow_origin_regexes": [
"allow_origin_regexes_value_1",
"allow_origin_regexes_value_2",
],
"allow_origins": ["allow_origins_value_1", "allow_origins_value_2"],
"disabled": True,
"expose_headers": ["expose_headers_value_1", "expose_headers_value_2"],
"max_age": 722,
},
"fault_injection_policy": {
"abort": {"http_status": 1219, "percentage": 0.10540000000000001},
"delay": {
"fixed_delay": {"nanos": 543, "seconds": 751},
"percentage": 0.10540000000000001,
},
},
"max_stream_duration": {},
"request_mirror_policy": {"backend_service": "backend_service_value"},
"retry_policy": {
"num_retries": 1197,
"per_try_timeout": {},
"retry_conditions": [
"retry_conditions_value_1",
"retry_conditions_value_2",
],
},
"timeout": {},
"url_rewrite": {
"host_rewrite": "host_rewrite_value",
"path_prefix_rewrite": "path_prefix_rewrite_value",
},
"weighted_backend_services": [
{
"backend_service": "backend_service_value",
"header_action": {
"request_headers_to_add": [
{
"header_name": "header_name_value",
"header_value": "header_value_value",
"replace": True,
}
],
"request_headers_to_remove": [
"request_headers_to_remove_value_1",
"request_headers_to_remove_value_2",
],
"response_headers_to_add": {},
"response_headers_to_remove": [
"response_headers_to_remove_value_1",
"response_headers_to_remove_value_2",
],
},
"weight": 648,
}
],
},
"default_service": "default_service_value",
"default_url_redirect": {
"host_redirect": "host_redirect_value",
"https_redirect": True,
"path_redirect": "path_redirect_value",
"prefix_redirect": "prefix_redirect_value",
"redirect_response_code": "redirect_response_code_value",
"strip_query": True,
},
"description": "description_value",
"fingerprint": "fingerprint_value",
"header_action": {},
"host_rules": [
{
"description": "description_value",
"hosts": ["hosts_value_1", "hosts_value_2"],
"path_matcher": "path_matcher_value",
}
],
"id": 205,
"kind": "kind_value",
"name": "name_value",
"path_matchers": [
{
"default_route_action": {},
"default_service": "default_service_value",
"default_url_redirect": {},
"description": "description_value",
"header_action": {},
"name": "name_value",
"path_rules": [
{
"paths": ["paths_value_1", "paths_value_2"],
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
"route_rules": [
{
"description": "description_value",
"header_action": {},
"match_rules": [
{
"full_path_match": "full_path_match_value",
"header_matches": [
{
"exact_match": "exact_match_value",
"header_name": "header_name_value",
"invert_match": True,
"prefix_match": "prefix_match_value",
"present_match": True,
"range_match": {
"range_end": 931,
"range_start": 1178,
},
"regex_match": "regex_match_value",
"suffix_match": "suffix_match_value",
}
],
"ignore_case": True,
"metadata_filters": [
{
"filter_labels": [
{
"name": "name_value",
"value": "value_value",
}
],
"filter_match_criteria": "filter_match_criteria_value",
}
],
"prefix_match": "prefix_match_value",
"query_parameter_matches": [
{
"exact_match": "exact_match_value",
"name": "name_value",
"present_match": True,
"regex_match": "regex_match_value",
}
],
"regex_match": "regex_match_value",
}
],
"priority": 898,
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
}
],
"region": "region_value",
"self_link": "self_link_value",
"tests": [
{
"description": "description_value",
"expected_output_url": "expected_output_url_value",
"expected_redirect_response_code": 3275,
"headers": [{"name": "name_value", "value": "value_value"}],
"host": "host_value",
"path": "path_value",
"service": "service_value",
}
],
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
url_map_resource=compute.UrlMap(
creation_timestamp="creation_timestamp_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/urlMaps"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertRegionUrlMapRequest(),
project="project_value",
region="region_value",
url_map_resource=compute.UrlMap(
creation_timestamp="creation_timestamp_value"
),
)
def test_insert_unary_rest_error():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ListRegionUrlMapsRequest, dict,])
def test_list_rest(request_type):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.UrlMapList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMapList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(request_type=compute.ListRegionUrlMapsRequest):
transport_class = transports.RegionUrlMapsRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.UrlMapList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMapList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionUrlMapsRestInterceptor(),
)
client = RegionUrlMapsClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.UrlMapList.to_json(compute.UrlMapList())
request = compute.ListRegionUrlMapsRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.UrlMapList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListRegionUrlMapsRequest
):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.UrlMapList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", region="region_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMapList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/urlMaps"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListRegionUrlMapsRequest(),
project="project_value",
region="region_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.UrlMapList(
items=[compute.UrlMap(), compute.UrlMap(), compute.UrlMap(),],
next_page_token="abc",
),
compute.UrlMapList(items=[], next_page_token="def",),
compute.UrlMapList(items=[compute.UrlMap(),], next_page_token="ghi",),
compute.UrlMapList(items=[compute.UrlMap(), compute.UrlMap(),],),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.UrlMapList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1", "region": "sample2"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.UrlMap) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [compute.PatchRegionUrlMapRequest, dict,])
def test_patch_unary_rest(request_type):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request_init["url_map_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"default_route_action": {
"cors_policy": {
"allow_credentials": True,
"allow_headers": ["allow_headers_value_1", "allow_headers_value_2"],
"allow_methods": ["allow_methods_value_1", "allow_methods_value_2"],
"allow_origin_regexes": [
"allow_origin_regexes_value_1",
"allow_origin_regexes_value_2",
],
"allow_origins": ["allow_origins_value_1", "allow_origins_value_2"],
"disabled": True,
"expose_headers": ["expose_headers_value_1", "expose_headers_value_2"],
"max_age": 722,
},
"fault_injection_policy": {
"abort": {"http_status": 1219, "percentage": 0.10540000000000001},
"delay": {
"fixed_delay": {"nanos": 543, "seconds": 751},
"percentage": 0.10540000000000001,
},
},
"max_stream_duration": {},
"request_mirror_policy": {"backend_service": "backend_service_value"},
"retry_policy": {
"num_retries": 1197,
"per_try_timeout": {},
"retry_conditions": [
"retry_conditions_value_1",
"retry_conditions_value_2",
],
},
"timeout": {},
"url_rewrite": {
"host_rewrite": "host_rewrite_value",
"path_prefix_rewrite": "path_prefix_rewrite_value",
},
"weighted_backend_services": [
{
"backend_service": "backend_service_value",
"header_action": {
"request_headers_to_add": [
{
"header_name": "header_name_value",
"header_value": "header_value_value",
"replace": True,
}
],
"request_headers_to_remove": [
"request_headers_to_remove_value_1",
"request_headers_to_remove_value_2",
],
"response_headers_to_add": {},
"response_headers_to_remove": [
"response_headers_to_remove_value_1",
"response_headers_to_remove_value_2",
],
},
"weight": 648,
}
],
},
"default_service": "default_service_value",
"default_url_redirect": {
"host_redirect": "host_redirect_value",
"https_redirect": True,
"path_redirect": "path_redirect_value",
"prefix_redirect": "prefix_redirect_value",
"redirect_response_code": "redirect_response_code_value",
"strip_query": True,
},
"description": "description_value",
"fingerprint": "fingerprint_value",
"header_action": {},
"host_rules": [
{
"description": "description_value",
"hosts": ["hosts_value_1", "hosts_value_2"],
"path_matcher": "path_matcher_value",
}
],
"id": 205,
"kind": "kind_value",
"name": "name_value",
"path_matchers": [
{
"default_route_action": {},
"default_service": "default_service_value",
"default_url_redirect": {},
"description": "description_value",
"header_action": {},
"name": "name_value",
"path_rules": [
{
"paths": ["paths_value_1", "paths_value_2"],
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
"route_rules": [
{
"description": "description_value",
"header_action": {},
"match_rules": [
{
"full_path_match": "full_path_match_value",
"header_matches": [
{
"exact_match": "exact_match_value",
"header_name": "header_name_value",
"invert_match": True,
"prefix_match": "prefix_match_value",
"present_match": True,
"range_match": {
"range_end": 931,
"range_start": 1178,
},
"regex_match": "regex_match_value",
"suffix_match": "suffix_match_value",
}
],
"ignore_case": True,
"metadata_filters": [
{
"filter_labels": [
{
"name": "name_value",
"value": "value_value",
}
],
"filter_match_criteria": "filter_match_criteria_value",
}
],
"prefix_match": "prefix_match_value",
"query_parameter_matches": [
{
"exact_match": "exact_match_value",
"name": "name_value",
"present_match": True,
"regex_match": "regex_match_value",
}
],
"regex_match": "regex_match_value",
}
],
"priority": 898,
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
}
],
"region": "region_value",
"self_link": "self_link_value",
"tests": [
{
"description": "description_value",
"expected_output_url": "expected_output_url_value",
"expected_redirect_response_code": 3275,
"headers": [{"name": "name_value", "value": "value_value"}],
"host": "host_value",
"path": "path_value",
"service": "service_value",
}
],
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_patch_unary_rest_required_fields(
request_type=compute.PatchRegionUrlMapRequest,
):
transport_class = transports.RegionUrlMapsRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["url_map"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).patch._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["urlMap"] = "url_map_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).patch._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "urlMap" in jsonified_request
assert jsonified_request["urlMap"] == "url_map_value"
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "patch",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_patch_unary_rest_unset_required_fields():
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.patch._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "region", "urlMap", "urlMapResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_patch_unary_rest_interceptors(null_interceptor):
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionUrlMapsRestInterceptor(),
)
client = RegionUrlMapsClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "post_patch"
) as post, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "pre_patch"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.PatchRegionUrlMapRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.patch_unary(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_patch_unary_rest_bad_request(
transport: str = "rest", request_type=compute.PatchRegionUrlMapRequest
):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request_init["url_map_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"default_route_action": {
"cors_policy": {
"allow_credentials": True,
"allow_headers": ["allow_headers_value_1", "allow_headers_value_2"],
"allow_methods": ["allow_methods_value_1", "allow_methods_value_2"],
"allow_origin_regexes": [
"allow_origin_regexes_value_1",
"allow_origin_regexes_value_2",
],
"allow_origins": ["allow_origins_value_1", "allow_origins_value_2"],
"disabled": True,
"expose_headers": ["expose_headers_value_1", "expose_headers_value_2"],
"max_age": 722,
},
"fault_injection_policy": {
"abort": {"http_status": 1219, "percentage": 0.10540000000000001},
"delay": {
"fixed_delay": {"nanos": 543, "seconds": 751},
"percentage": 0.10540000000000001,
},
},
"max_stream_duration": {},
"request_mirror_policy": {"backend_service": "backend_service_value"},
"retry_policy": {
"num_retries": 1197,
"per_try_timeout": {},
"retry_conditions": [
"retry_conditions_value_1",
"retry_conditions_value_2",
],
},
"timeout": {},
"url_rewrite": {
"host_rewrite": "host_rewrite_value",
"path_prefix_rewrite": "path_prefix_rewrite_value",
},
"weighted_backend_services": [
{
"backend_service": "backend_service_value",
"header_action": {
"request_headers_to_add": [
{
"header_name": "header_name_value",
"header_value": "header_value_value",
"replace": True,
}
],
"request_headers_to_remove": [
"request_headers_to_remove_value_1",
"request_headers_to_remove_value_2",
],
"response_headers_to_add": {},
"response_headers_to_remove": [
"response_headers_to_remove_value_1",
"response_headers_to_remove_value_2",
],
},
"weight": 648,
}
],
},
"default_service": "default_service_value",
"default_url_redirect": {
"host_redirect": "host_redirect_value",
"https_redirect": True,
"path_redirect": "path_redirect_value",
"prefix_redirect": "prefix_redirect_value",
"redirect_response_code": "redirect_response_code_value",
"strip_query": True,
},
"description": "description_value",
"fingerprint": "fingerprint_value",
"header_action": {},
"host_rules": [
{
"description": "description_value",
"hosts": ["hosts_value_1", "hosts_value_2"],
"path_matcher": "path_matcher_value",
}
],
"id": 205,
"kind": "kind_value",
"name": "name_value",
"path_matchers": [
{
"default_route_action": {},
"default_service": "default_service_value",
"default_url_redirect": {},
"description": "description_value",
"header_action": {},
"name": "name_value",
"path_rules": [
{
"paths": ["paths_value_1", "paths_value_2"],
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
"route_rules": [
{
"description": "description_value",
"header_action": {},
"match_rules": [
{
"full_path_match": "full_path_match_value",
"header_matches": [
{
"exact_match": "exact_match_value",
"header_name": "header_name_value",
"invert_match": True,
"prefix_match": "prefix_match_value",
"present_match": True,
"range_match": {
"range_end": 931,
"range_start": 1178,
},
"regex_match": "regex_match_value",
"suffix_match": "suffix_match_value",
}
],
"ignore_case": True,
"metadata_filters": [
{
"filter_labels": [
{
"name": "name_value",
"value": "value_value",
}
],
"filter_match_criteria": "filter_match_criteria_value",
}
],
"prefix_match": "prefix_match_value",
"query_parameter_matches": [
{
"exact_match": "exact_match_value",
"name": "name_value",
"present_match": True,
"regex_match": "regex_match_value",
}
],
"regex_match": "regex_match_value",
}
],
"priority": 898,
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
}
],
"region": "region_value",
"self_link": "self_link_value",
"tests": [
{
"description": "description_value",
"expected_output_url": "expected_output_url_value",
"expected_redirect_response_code": 3275,
"headers": [{"name": "name_value", "value": "value_value"}],
"host": "host_value",
"path": "path_value",
"service": "service_value",
}
],
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.patch_unary(request)
def test_patch_unary_rest_flattened():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"url_map": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
url_map="url_map_value",
url_map_resource=compute.UrlMap(
creation_timestamp="creation_timestamp_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.patch_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}"
% client.transport._host,
args[1],
)
def test_patch_unary_rest_flattened_error(transport: str = "rest"):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch_unary(
compute.PatchRegionUrlMapRequest(),
project="project_value",
region="region_value",
url_map="url_map_value",
url_map_resource=compute.UrlMap(
creation_timestamp="creation_timestamp_value"
),
)
def test_patch_unary_rest_error():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.UpdateRegionUrlMapRequest, dict,])
def test_update_unary_rest(request_type):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request_init["url_map_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"default_route_action": {
"cors_policy": {
"allow_credentials": True,
"allow_headers": ["allow_headers_value_1", "allow_headers_value_2"],
"allow_methods": ["allow_methods_value_1", "allow_methods_value_2"],
"allow_origin_regexes": [
"allow_origin_regexes_value_1",
"allow_origin_regexes_value_2",
],
"allow_origins": ["allow_origins_value_1", "allow_origins_value_2"],
"disabled": True,
"expose_headers": ["expose_headers_value_1", "expose_headers_value_2"],
"max_age": 722,
},
"fault_injection_policy": {
"abort": {"http_status": 1219, "percentage": 0.10540000000000001},
"delay": {
"fixed_delay": {"nanos": 543, "seconds": 751},
"percentage": 0.10540000000000001,
},
},
"max_stream_duration": {},
"request_mirror_policy": {"backend_service": "backend_service_value"},
"retry_policy": {
"num_retries": 1197,
"per_try_timeout": {},
"retry_conditions": [
"retry_conditions_value_1",
"retry_conditions_value_2",
],
},
"timeout": {},
"url_rewrite": {
"host_rewrite": "host_rewrite_value",
"path_prefix_rewrite": "path_prefix_rewrite_value",
},
"weighted_backend_services": [
{
"backend_service": "backend_service_value",
"header_action": {
"request_headers_to_add": [
{
"header_name": "header_name_value",
"header_value": "header_value_value",
"replace": True,
}
],
"request_headers_to_remove": [
"request_headers_to_remove_value_1",
"request_headers_to_remove_value_2",
],
"response_headers_to_add": {},
"response_headers_to_remove": [
"response_headers_to_remove_value_1",
"response_headers_to_remove_value_2",
],
},
"weight": 648,
}
],
},
"default_service": "default_service_value",
"default_url_redirect": {
"host_redirect": "host_redirect_value",
"https_redirect": True,
"path_redirect": "path_redirect_value",
"prefix_redirect": "prefix_redirect_value",
"redirect_response_code": "redirect_response_code_value",
"strip_query": True,
},
"description": "description_value",
"fingerprint": "fingerprint_value",
"header_action": {},
"host_rules": [
{
"description": "description_value",
"hosts": ["hosts_value_1", "hosts_value_2"],
"path_matcher": "path_matcher_value",
}
],
"id": 205,
"kind": "kind_value",
"name": "name_value",
"path_matchers": [
{
"default_route_action": {},
"default_service": "default_service_value",
"default_url_redirect": {},
"description": "description_value",
"header_action": {},
"name": "name_value",
"path_rules": [
{
"paths": ["paths_value_1", "paths_value_2"],
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
"route_rules": [
{
"description": "description_value",
"header_action": {},
"match_rules": [
{
"full_path_match": "full_path_match_value",
"header_matches": [
{
"exact_match": "exact_match_value",
"header_name": "header_name_value",
"invert_match": True,
"prefix_match": "prefix_match_value",
"present_match": True,
"range_match": {
"range_end": 931,
"range_start": 1178,
},
"regex_match": "regex_match_value",
"suffix_match": "suffix_match_value",
}
],
"ignore_case": True,
"metadata_filters": [
{
"filter_labels": [
{
"name": "name_value",
"value": "value_value",
}
],
"filter_match_criteria": "filter_match_criteria_value",
}
],
"prefix_match": "prefix_match_value",
"query_parameter_matches": [
{
"exact_match": "exact_match_value",
"name": "name_value",
"present_match": True,
"regex_match": "regex_match_value",
}
],
"regex_match": "regex_match_value",
}
],
"priority": 898,
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
}
],
"region": "region_value",
"self_link": "self_link_value",
"tests": [
{
"description": "description_value",
"expected_output_url": "expected_output_url_value",
"expected_redirect_response_code": 3275,
"headers": [{"name": "name_value", "value": "value_value"}],
"host": "host_value",
"path": "path_value",
"service": "service_value",
}
],
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_update_unary_rest_required_fields(
request_type=compute.UpdateRegionUrlMapRequest,
):
transport_class = transports.RegionUrlMapsRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["url_map"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).update._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["urlMap"] = "url_map_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).update._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "urlMap" in jsonified_request
assert jsonified_request["urlMap"] == "url_map_value"
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "put",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_update_unary_rest_unset_required_fields():
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.update._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "region", "urlMap", "urlMapResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_update_unary_rest_interceptors(null_interceptor):
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionUrlMapsRestInterceptor(),
)
client = RegionUrlMapsClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "post_update"
) as post, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "pre_update"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.UpdateRegionUrlMapRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.update_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_update_unary_rest_bad_request(
transport: str = "rest", request_type=compute.UpdateRegionUrlMapRequest
):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request_init["url_map_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"default_route_action": {
"cors_policy": {
"allow_credentials": True,
"allow_headers": ["allow_headers_value_1", "allow_headers_value_2"],
"allow_methods": ["allow_methods_value_1", "allow_methods_value_2"],
"allow_origin_regexes": [
"allow_origin_regexes_value_1",
"allow_origin_regexes_value_2",
],
"allow_origins": ["allow_origins_value_1", "allow_origins_value_2"],
"disabled": True,
"expose_headers": ["expose_headers_value_1", "expose_headers_value_2"],
"max_age": 722,
},
"fault_injection_policy": {
"abort": {"http_status": 1219, "percentage": 0.10540000000000001},
"delay": {
"fixed_delay": {"nanos": 543, "seconds": 751},
"percentage": 0.10540000000000001,
},
},
"max_stream_duration": {},
"request_mirror_policy": {"backend_service": "backend_service_value"},
"retry_policy": {
"num_retries": 1197,
"per_try_timeout": {},
"retry_conditions": [
"retry_conditions_value_1",
"retry_conditions_value_2",
],
},
"timeout": {},
"url_rewrite": {
"host_rewrite": "host_rewrite_value",
"path_prefix_rewrite": "path_prefix_rewrite_value",
},
"weighted_backend_services": [
{
"backend_service": "backend_service_value",
"header_action": {
"request_headers_to_add": [
{
"header_name": "header_name_value",
"header_value": "header_value_value",
"replace": True,
}
],
"request_headers_to_remove": [
"request_headers_to_remove_value_1",
"request_headers_to_remove_value_2",
],
"response_headers_to_add": {},
"response_headers_to_remove": [
"response_headers_to_remove_value_1",
"response_headers_to_remove_value_2",
],
},
"weight": 648,
}
],
},
"default_service": "default_service_value",
"default_url_redirect": {
"host_redirect": "host_redirect_value",
"https_redirect": True,
"path_redirect": "path_redirect_value",
"prefix_redirect": "prefix_redirect_value",
"redirect_response_code": "redirect_response_code_value",
"strip_query": True,
},
"description": "description_value",
"fingerprint": "fingerprint_value",
"header_action": {},
"host_rules": [
{
"description": "description_value",
"hosts": ["hosts_value_1", "hosts_value_2"],
"path_matcher": "path_matcher_value",
}
],
"id": 205,
"kind": "kind_value",
"name": "name_value",
"path_matchers": [
{
"default_route_action": {},
"default_service": "default_service_value",
"default_url_redirect": {},
"description": "description_value",
"header_action": {},
"name": "name_value",
"path_rules": [
{
"paths": ["paths_value_1", "paths_value_2"],
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
"route_rules": [
{
"description": "description_value",
"header_action": {},
"match_rules": [
{
"full_path_match": "full_path_match_value",
"header_matches": [
{
"exact_match": "exact_match_value",
"header_name": "header_name_value",
"invert_match": True,
"prefix_match": "prefix_match_value",
"present_match": True,
"range_match": {
"range_end": 931,
"range_start": 1178,
},
"regex_match": "regex_match_value",
"suffix_match": "suffix_match_value",
}
],
"ignore_case": True,
"metadata_filters": [
{
"filter_labels": [
{
"name": "name_value",
"value": "value_value",
}
],
"filter_match_criteria": "filter_match_criteria_value",
}
],
"prefix_match": "prefix_match_value",
"query_parameter_matches": [
{
"exact_match": "exact_match_value",
"name": "name_value",
"present_match": True,
"regex_match": "regex_match_value",
}
],
"regex_match": "regex_match_value",
}
],
"priority": 898,
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
}
],
"region": "region_value",
"self_link": "self_link_value",
"tests": [
{
"description": "description_value",
"expected_output_url": "expected_output_url_value",
"expected_redirect_response_code": 3275,
"headers": [{"name": "name_value", "value": "value_value"}],
"host": "host_value",
"path": "path_value",
"service": "service_value",
}
],
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.update_unary(request)
def test_update_unary_rest_flattened():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"url_map": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
url_map="url_map_value",
url_map_resource=compute.UrlMap(
creation_timestamp="creation_timestamp_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.update_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}"
% client.transport._host,
args[1],
)
def test_update_unary_rest_flattened_error(transport: str = "rest"):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_unary(
compute.UpdateRegionUrlMapRequest(),
project="project_value",
region="region_value",
url_map="url_map_value",
url_map_resource=compute.UrlMap(
creation_timestamp="creation_timestamp_value"
),
)
def test_update_unary_rest_error():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ValidateRegionUrlMapRequest, dict,])
def test_validate_rest(request_type):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request_init["region_url_maps_validate_request_resource"] = {
"resource": {
"creation_timestamp": "creation_timestamp_value",
"default_route_action": {
"cors_policy": {
"allow_credentials": True,
"allow_headers": ["allow_headers_value_1", "allow_headers_value_2"],
"allow_methods": ["allow_methods_value_1", "allow_methods_value_2"],
"allow_origin_regexes": [
"allow_origin_regexes_value_1",
"allow_origin_regexes_value_2",
],
"allow_origins": ["allow_origins_value_1", "allow_origins_value_2"],
"disabled": True,
"expose_headers": [
"expose_headers_value_1",
"expose_headers_value_2",
],
"max_age": 722,
},
"fault_injection_policy": {
"abort": {"http_status": 1219, "percentage": 0.10540000000000001},
"delay": {
"fixed_delay": {"nanos": 543, "seconds": 751},
"percentage": 0.10540000000000001,
},
},
"max_stream_duration": {},
"request_mirror_policy": {"backend_service": "backend_service_value"},
"retry_policy": {
"num_retries": 1197,
"per_try_timeout": {},
"retry_conditions": [
"retry_conditions_value_1",
"retry_conditions_value_2",
],
},
"timeout": {},
"url_rewrite": {
"host_rewrite": "host_rewrite_value",
"path_prefix_rewrite": "path_prefix_rewrite_value",
},
"weighted_backend_services": [
{
"backend_service": "backend_service_value",
"header_action": {
"request_headers_to_add": [
{
"header_name": "header_name_value",
"header_value": "header_value_value",
"replace": True,
}
],
"request_headers_to_remove": [
"request_headers_to_remove_value_1",
"request_headers_to_remove_value_2",
],
"response_headers_to_add": {},
"response_headers_to_remove": [
"response_headers_to_remove_value_1",
"response_headers_to_remove_value_2",
],
},
"weight": 648,
}
],
},
"default_service": "default_service_value",
"default_url_redirect": {
"host_redirect": "host_redirect_value",
"https_redirect": True,
"path_redirect": "path_redirect_value",
"prefix_redirect": "prefix_redirect_value",
"redirect_response_code": "redirect_response_code_value",
"strip_query": True,
},
"description": "description_value",
"fingerprint": "fingerprint_value",
"header_action": {},
"host_rules": [
{
"description": "description_value",
"hosts": ["hosts_value_1", "hosts_value_2"],
"path_matcher": "path_matcher_value",
}
],
"id": 205,
"kind": "kind_value",
"name": "name_value",
"path_matchers": [
{
"default_route_action": {},
"default_service": "default_service_value",
"default_url_redirect": {},
"description": "description_value",
"header_action": {},
"name": "name_value",
"path_rules": [
{
"paths": ["paths_value_1", "paths_value_2"],
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
"route_rules": [
{
"description": "description_value",
"header_action": {},
"match_rules": [
{
"full_path_match": "full_path_match_value",
"header_matches": [
{
"exact_match": "exact_match_value",
"header_name": "header_name_value",
"invert_match": True,
"prefix_match": "prefix_match_value",
"present_match": True,
"range_match": {
"range_end": 931,
"range_start": 1178,
},
"regex_match": "regex_match_value",
"suffix_match": "suffix_match_value",
}
],
"ignore_case": True,
"metadata_filters": [
{
"filter_labels": [
{
"name": "name_value",
"value": "value_value",
}
],
"filter_match_criteria": "filter_match_criteria_value",
}
],
"prefix_match": "prefix_match_value",
"query_parameter_matches": [
{
"exact_match": "exact_match_value",
"name": "name_value",
"present_match": True,
"regex_match": "regex_match_value",
}
],
"regex_match": "regex_match_value",
}
],
"priority": 898,
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
}
],
"region": "region_value",
"self_link": "self_link_value",
"tests": [
{
"description": "description_value",
"expected_output_url": "expected_output_url_value",
"expected_redirect_response_code": 3275,
"headers": [{"name": "name_value", "value": "value_value"}],
"host": "host_value",
"path": "path_value",
"service": "service_value",
}
],
}
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.UrlMapsValidateResponse()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMapsValidateResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.validate(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.UrlMapsValidateResponse)
def test_validate_rest_required_fields(
request_type=compute.ValidateRegionUrlMapRequest,
):
transport_class = transports.RegionUrlMapsRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["url_map"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).validate._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["urlMap"] = "url_map_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).validate._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "urlMap" in jsonified_request
assert jsonified_request["urlMap"] == "url_map_value"
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.UrlMapsValidateResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMapsValidateResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.validate(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_validate_rest_unset_required_fields():
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.validate._get_unset_required_fields({})
assert set(unset_fields) == (
set(())
& set(("project", "region", "regionUrlMapsValidateRequestResource", "urlMap",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_validate_rest_interceptors(null_interceptor):
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionUrlMapsRestInterceptor(),
)
client = RegionUrlMapsClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "post_validate"
) as post, mock.patch.object(
transports.RegionUrlMapsRestInterceptor, "pre_validate"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.UrlMapsValidateResponse.to_json(
compute.UrlMapsValidateResponse()
)
request = compute.ValidateRegionUrlMapRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.UrlMapsValidateResponse
client.validate(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_validate_rest_bad_request(
transport: str = "rest", request_type=compute.ValidateRegionUrlMapRequest
):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"}
request_init["region_url_maps_validate_request_resource"] = {
"resource": {
"creation_timestamp": "creation_timestamp_value",
"default_route_action": {
"cors_policy": {
"allow_credentials": True,
"allow_headers": ["allow_headers_value_1", "allow_headers_value_2"],
"allow_methods": ["allow_methods_value_1", "allow_methods_value_2"],
"allow_origin_regexes": [
"allow_origin_regexes_value_1",
"allow_origin_regexes_value_2",
],
"allow_origins": ["allow_origins_value_1", "allow_origins_value_2"],
"disabled": True,
"expose_headers": [
"expose_headers_value_1",
"expose_headers_value_2",
],
"max_age": 722,
},
"fault_injection_policy": {
"abort": {"http_status": 1219, "percentage": 0.10540000000000001},
"delay": {
"fixed_delay": {"nanos": 543, "seconds": 751},
"percentage": 0.10540000000000001,
},
},
"max_stream_duration": {},
"request_mirror_policy": {"backend_service": "backend_service_value"},
"retry_policy": {
"num_retries": 1197,
"per_try_timeout": {},
"retry_conditions": [
"retry_conditions_value_1",
"retry_conditions_value_2",
],
},
"timeout": {},
"url_rewrite": {
"host_rewrite": "host_rewrite_value",
"path_prefix_rewrite": "path_prefix_rewrite_value",
},
"weighted_backend_services": [
{
"backend_service": "backend_service_value",
"header_action": {
"request_headers_to_add": [
{
"header_name": "header_name_value",
"header_value": "header_value_value",
"replace": True,
}
],
"request_headers_to_remove": [
"request_headers_to_remove_value_1",
"request_headers_to_remove_value_2",
],
"response_headers_to_add": {},
"response_headers_to_remove": [
"response_headers_to_remove_value_1",
"response_headers_to_remove_value_2",
],
},
"weight": 648,
}
],
},
"default_service": "default_service_value",
"default_url_redirect": {
"host_redirect": "host_redirect_value",
"https_redirect": True,
"path_redirect": "path_redirect_value",
"prefix_redirect": "prefix_redirect_value",
"redirect_response_code": "redirect_response_code_value",
"strip_query": True,
},
"description": "description_value",
"fingerprint": "fingerprint_value",
"header_action": {},
"host_rules": [
{
"description": "description_value",
"hosts": ["hosts_value_1", "hosts_value_2"],
"path_matcher": "path_matcher_value",
}
],
"id": 205,
"kind": "kind_value",
"name": "name_value",
"path_matchers": [
{
"default_route_action": {},
"default_service": "default_service_value",
"default_url_redirect": {},
"description": "description_value",
"header_action": {},
"name": "name_value",
"path_rules": [
{
"paths": ["paths_value_1", "paths_value_2"],
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
"route_rules": [
{
"description": "description_value",
"header_action": {},
"match_rules": [
{
"full_path_match": "full_path_match_value",
"header_matches": [
{
"exact_match": "exact_match_value",
"header_name": "header_name_value",
"invert_match": True,
"prefix_match": "prefix_match_value",
"present_match": True,
"range_match": {
"range_end": 931,
"range_start": 1178,
},
"regex_match": "regex_match_value",
"suffix_match": "suffix_match_value",
}
],
"ignore_case": True,
"metadata_filters": [
{
"filter_labels": [
{
"name": "name_value",
"value": "value_value",
}
],
"filter_match_criteria": "filter_match_criteria_value",
}
],
"prefix_match": "prefix_match_value",
"query_parameter_matches": [
{
"exact_match": "exact_match_value",
"name": "name_value",
"present_match": True,
"regex_match": "regex_match_value",
}
],
"regex_match": "regex_match_value",
}
],
"priority": 898,
"route_action": {},
"service": "service_value",
"url_redirect": {},
}
],
}
],
"region": "region_value",
"self_link": "self_link_value",
"tests": [
{
"description": "description_value",
"expected_output_url": "expected_output_url_value",
"expected_redirect_response_code": 3275,
"headers": [{"name": "name_value", "value": "value_value"}],
"host": "host_value",
"path": "path_value",
"service": "service_value",
}
],
}
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.validate(request)
def test_validate_rest_flattened():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.UrlMapsValidateResponse()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"url_map": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
url_map="url_map_value",
region_url_maps_validate_request_resource=compute.RegionUrlMapsValidateRequest(
resource=compute.UrlMap(creation_timestamp="creation_timestamp_value")
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.UrlMapsValidateResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.validate(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}/validate"
% client.transport._host,
args[1],
)
def test_validate_rest_flattened_error(transport: str = "rest"):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.validate(
compute.ValidateRegionUrlMapRequest(),
project="project_value",
region="region_value",
url_map="url_map_value",
region_url_maps_validate_request_resource=compute.RegionUrlMapsValidateRequest(
resource=compute.UrlMap(creation_timestamp="creation_timestamp_value")
),
)
def test_validate_rest_error():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionUrlMapsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionUrlMapsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionUrlMapsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionUrlMapsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RegionUrlMapsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RegionUrlMapsClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.RegionUrlMapsRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_region_url_maps_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RegionUrlMapsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_region_url_maps_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.region_url_maps.transports.RegionUrlMapsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.RegionUrlMapsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
"patch",
"update",
"validate",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_region_url_maps_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_url_maps.transports.RegionUrlMapsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionUrlMapsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_region_url_maps_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.region_url_maps.transports.RegionUrlMapsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionUrlMapsTransport()
adc.assert_called_once()
def test_region_url_maps_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionUrlMapsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_region_url_maps_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.RegionUrlMapsRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_url_maps_host_no_port(transport_name):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_url_maps_host_with_port(transport_name):
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = RegionUrlMapsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = RegionUrlMapsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RegionUrlMapsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = RegionUrlMapsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = RegionUrlMapsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RegionUrlMapsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = RegionUrlMapsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = RegionUrlMapsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RegionUrlMapsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = RegionUrlMapsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = RegionUrlMapsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RegionUrlMapsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = RegionUrlMapsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = RegionUrlMapsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RegionUrlMapsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.RegionUrlMapsTransport, "_prep_wrapped_messages"
) as prep:
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.RegionUrlMapsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = RegionUrlMapsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = RegionUrlMapsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(RegionUrlMapsClient, transports.RegionUrlMapsRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-compute
|
tests/unit/gapic/compute_v1/test_region_url_maps.py
|
Python
|
apache-2.0
| 172,085
|
[
"Octopus"
] |
05892f2cbbeae724fcb4c5500a05e7474e0e4662a03485b18fe478fe21c251f7
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
import time
from ..helpers import UniqueCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.problem import ProblemPage
from ...pages.common.logout import LogoutPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class CoursewareTest(UniqueCourseTest):
"""
Test courseware.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2')
)
)
).install()
# Auto-auth register for the course.
self._auto_auth(self.USERNAME, self.EMAIL, False)
def _goto_problem_page(self):
"""
Open problem page with assertion.
"""
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
self.assertEqual(self.problem_page.problem_name, 'TEST PROBLEM 1')
def _change_problem_release_date_in_studio(self):
"""
"""
self.course_outline.q(css=".subsection-header-actions .configure-button").first.click()
self.course_outline.q(css="#start_date").fill("01/01/2030")
self.course_outline.q(css=".action-save").first.click()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def test_courseware(self):
"""
Test courseware if recent visited subsection become unpublished.
"""
# Visit problem page as a student.
self._goto_problem_page()
# Logout and login as a staff user.
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
# Visit course outline page in studio.
self.course_outline.visit()
# Set release date for subsection in future.
self._change_problem_release_date_in_studio()
# Wait for 2 seconds to save new date.
time.sleep(2)
# Logout and login as a student.
LogoutPage(self.browser).visit()
self._auto_auth(self.USERNAME, self.EMAIL, False)
# Visit courseware as a student.
self.courseware_page.visit()
# Problem name should be "TEST PROBLEM 2".
self.assertEqual(self.problem_page.problem_name, 'TEST PROBLEM 2')
|
olexiim/edx-platform
|
common/test/acceptance/tests/lms/test_lms_courseware.py
|
Python
|
agpl-3.0
| 3,652
|
[
"VisIt"
] |
aa48ccbd453221949ffb3f6d368424c460c369a91df980fb49fcb30746c15b04
|
from __future__ import division, print_function, absolute_import
import numpy as np
import warnings
from dipy.utils.six.moves import xrange
from dipy.core.geometry import cart2sphere, sphere2cart, vector_norm
from dipy.core.onetime import auto_attr
from dipy.reconst.recspeed import remove_similar_vertices
__all__ = ['Sphere', 'HemiSphere', 'faces_from_sphere_vertices',
'unique_edges']
def _all_specified(*args):
for a in args:
if a is None:
return False
return True
def _some_specified(*args):
for a in args:
if a is not None:
return True
return False
def faces_from_sphere_vertices(vertices):
"""
Triangulate a set of vertices on the sphere.
Parameters
----------
vertices : (M, 3) ndarray
XYZ coordinates of vertices on the sphere.
Returns
-------
faces : (N, 3) ndarray
Indices into vertices; forms triangular faces.
"""
from scipy.spatial import Delaunay
faces = Delaunay(vertices).convex_hull
if len(vertices) < 2**16:
return np.asarray(faces, np.uint16)
else:
return faces
def unique_edges(faces, return_mapping=False):
"""Extract all unique edges from given triangular faces.
Parameters
----------
faces : (N, 3) ndarray
Vertex indices forming triangular faces.
return_mapping : bool
If true, a mapping to the edges of each face is returned.
Returns
-------
edges : (N, 2) ndarray
Unique edges.
mapping : (N, 3)
For each face, [x, y, z], a mapping to it's edges [a, b, c].
::
y
/\
/ \
a/ \b
/ \
/ \
/__________\
x c z
"""
faces = np.asarray(faces)
edges = np.concatenate([faces[:, 0:2], faces[:, 1:3], faces[:, ::2]])
if return_mapping:
ue, inverse = unique_sets(edges, return_inverse=True)
return ue, inverse.reshape((3, -1)).T
else:
return unique_sets(edges)
def unique_sets(sets, return_inverse=False):
"""Remove duplicate sets.
Parameters
----------
sets : array (N, k)
N sets of size k.
return_inverse : bool
If True, also returns the indices of unique_sets that can be used
to reconstruct `sets` (the original ordering of each set may not be
preserved).
Return
------
unique_sets : array
Unique sets.
inverse : array (N,)
The indices to reconstruct `sets` from `unique_sets`.
"""
sets = np.sort(sets, 1)
order = np.lexsort(sets.T)
sets = sets[order]
flag = np.ones(len(sets), 'bool')
flag[1:] = (sets[1:] != sets[:-1]).any(-1)
uniqsets = sets[flag]
if return_inverse:
inverse = np.empty_like(order)
inverse[order] = np.arange(len(order))
index = flag.cumsum() - 1
return uniqsets, index[inverse]
else:
return uniqsets
class Sphere(object):
"""Points on the unit sphere.
The sphere can be constructed using one of three conventions::
Sphere(x, y, z)
Sphere(xyz=xyz)
Sphere(theta=theta, phi=phi)
Parameters
----------
x, y, z : 1-D array_like
Vertices as x-y-z coordinates.
theta, phi : 1-D array_like
Vertices as spherical coordinates. Theta and phi are the inclination
and azimuth angles respectively.
xyz : (N, 3) ndarray
Vertices as x-y-z coordinates.
faces : (N, 3) ndarray
Indices into vertices that form triangular faces. If unspecified,
the faces are computed using a Delaunay triangulation.
edges : (N, 2) ndarray
Edges between vertices. If unspecified, the edges are
derived from the faces.
"""
def __init__(self, x=None, y=None, z=None,
theta=None, phi=None,
xyz=None,
faces=None, edges=None):
all_specified = _all_specified(x, y, z) + _all_specified(xyz) + \
_all_specified(theta, phi)
one_complete = (_some_specified(x, y, z) + _some_specified(xyz) +
_some_specified(theta, phi))
if not (all_specified == 1 and one_complete == 1):
raise ValueError("Sphere must be constructed using either "
"(x,y,z), (theta, phi) or xyz.")
if edges is not None and faces is None:
raise ValueError("Either specify both faces and "
"edges, only faces, or neither.")
if edges is not None:
self.edges = np.asarray(edges)
if faces is not None:
self.faces = np.asarray(faces)
if theta is not None:
self.theta = np.array(theta, copy=False, ndmin=1)
self.phi = np.array(phi, copy=False, ndmin=1)
return
if xyz is not None:
xyz = np.asarray(xyz)
x, y, z = xyz.T
x, y, z = (np.asarray(t) for t in (x, y, z))
r, self.theta, self.phi = cart2sphere(x, y, z)
if not np.allclose(r, 1):
warnings.warn("Vertices are not on the unit sphere.")
@auto_attr
def vertices(self):
return np.column_stack(sphere2cart(1, self.theta, self.phi))
@property
def x(self):
return self.vertices[:, 0]
@property
def y(self):
return self.vertices[:, 1]
@property
def z(self):
return self.vertices[:, 2]
@auto_attr
def faces(self):
faces = faces_from_sphere_vertices(self.vertices)
return faces
@auto_attr
def edges(self):
return unique_edges(self.faces)
def subdivide(self, n=1):
"""Subdivides each face of the sphere into four new faces.
New vertices are created at a, b, and c. Then each face [x, y, z] is
divided into faces [x, a, c], [y, a, b], [z, b, c], and [a, b, c].
::
y
/\
/ \
a/____\b
/\ /\
/ \ / \
/____\/____\
x c z
Parameters
----------
n : int, optional
The number of subdivisions to preform.
Returns
-------
new_sphere : Sphere
The subdivided sphere.
"""
vertices = self.vertices
faces = self.faces
for i in xrange(n):
edges, mapping = unique_edges(faces, return_mapping=True)
new_vertices = vertices[edges].sum(1)
new_vertices /= vector_norm(new_vertices, keepdims=True)
mapping += len(vertices)
vertices = np.vstack([vertices, new_vertices])
x, y, z = faces.T
a, b, c = mapping.T
face1 = np.column_stack([x, a, c])
face2 = np.column_stack([y, b, a])
face3 = np.column_stack([z, c, b])
face4 = mapping
faces = np.concatenate([face1, face2, face3, face4])
if len(vertices) < 2**16:
faces = np.asarray(faces, dtype='uint16')
return Sphere(xyz=vertices, faces=faces)
def find_closest(self, xyz):
"""
Find the index of the vertex in the Sphere closest to the input vector
Parameters
----------
xyz : array-like, 3 elements
A unit vector
Return
------
idx : int
The index into the Sphere.vertices array that gives the closest
vertex (in angle).
"""
cos_sim = np.dot(self.vertices, xyz)
return np.argmax(cos_sim)
class HemiSphere(Sphere):
"""Points on the unit sphere.
A HemiSphere is similar to a Sphere but it takes antipodal symmetry into
account. Antipodal symmetry means that point v on a HemiSphere is the same
as the point -v. Duplicate points are discarded when constructing a
HemiSphere (including antipodal duplicates). `edges` and `faces` are
remapped to the remaining points as closely as possible.
The HemiSphere can be constructed using one of three conventions::
HemiSphere(x, y, z)
HemiSphere(xyz=xyz)
HemiSphere(theta=theta, phi=phi)
Parameters
----------
x, y, z : 1-D array_like
Vertices as x-y-z coordinates.
theta, phi : 1-D array_like
Vertices as spherical coordinates. Theta and phi are the inclination
and azimuth angles respectively.
xyz : (N, 3) ndarray
Vertices as x-y-z coordinates.
faces : (N, 3) ndarray
Indices into vertices that form triangular faces. If unspecified,
the faces are computed using a Delaunay triangulation.
edges : (N, 2) ndarray
Edges between vertices. If unspecified, the edges are
derived from the faces.
tol : float
Angle in degrees. Vertices that are less than tol degrees apart are
treated as duplicates.
See Also
--------
Sphere
"""
def __init__(self, x=None, y=None, z=None,
theta=None, phi=None,
xyz=None,
faces=None, edges=None, tol=1e-5):
"""Create a HemiSphere from points"""
sphere = Sphere(x=x, y=y, z=z, theta=theta, phi=phi, xyz=xyz)
uniq_vertices, mapping = remove_similar_vertices(sphere.vertices, tol,
return_mapping=True)
uniq_vertices *= 1 - 2*(uniq_vertices[:, -1:] < 0)
if faces is not None:
faces = np.asarray(faces)
faces = unique_sets(mapping[faces])
if edges is not None:
edges = np.asarray(edges)
edges = unique_sets(mapping[edges])
Sphere.__init__(self, xyz=uniq_vertices, edges=edges, faces=faces)
@classmethod
def from_sphere(klass, sphere, tol=1e-5):
"""Create instance from a Sphere"""
return klass(theta=sphere.theta, phi=sphere.phi,
edges=sphere.edges, faces=sphere.faces, tol=tol)
def mirror(self):
"""Create a full Sphere from a HemiSphere"""
n = len(self.vertices)
vertices = np.vstack([self.vertices, -self.vertices])
edges = np.vstack([self.edges, n + self.edges])
_switch_vertex(edges[:, 0], edges[:, 1], vertices)
faces = np.vstack([self.faces, n + self.faces])
_switch_vertex(faces[:, 0], faces[:, 1], vertices)
_switch_vertex(faces[:, 0], faces[:, 2], vertices)
return Sphere(xyz=vertices, edges=edges, faces=faces)
@auto_attr
def faces(self):
vertices = np.vstack([self.vertices, -self.vertices])
faces = faces_from_sphere_vertices(vertices)
return unique_sets(faces % len(self.vertices))
def subdivide(self, n=1):
"""Create a more subdivided HemiSphere
See Sphere.subdivide for full documentation.
"""
sphere = self.mirror()
sphere = sphere.subdivide(n)
return HemiSphere.from_sphere(sphere)
def find_closest(self, xyz):
"""
Find the index of the vertex in the Sphere closest to the input vector,
taking into account antipodal symmetry
Parameters
----------
xyz : array-like, 3 elements
A unit vector
Return
------
idx : int
The index into the Sphere.vertices array that gives the closest
vertex (in angle).
"""
cos_sim = abs(np.dot(self.vertices, xyz))
return np.argmax(cos_sim)
def _switch_vertex(index1, index2, vertices):
"""When we mirror an edge (a, b). We can either create (a, b) and (a', b')
OR (a, b') and (a', b). The angles of edges (a, b) and (a, b') are
supplementary, so we choose the two new edges such that their angles are
less than 90 degrees.
"""
n = len(vertices)
A = vertices[index1]
B = vertices[index2]
is_far = (A * B).sum(-1) < 0
index2[is_far] = index2[is_far] + (n / 2.0)
index2 %= n
def _get_forces(charges):
r"""Given a set of charges on the surface of the sphere gets total force
those charges exert on each other.
The force exerted by one charge on another is given by Coulomb's law. For
this simulation we use charges of equal magnitude so this force can be
written as $\vec{r}/r^3$, up to a constant factor, where $\vec{r}$ is the
separation of the two charges and $r$ is the magnitude of $\vec{r}$. Forces
are additive so the total force on each of the charges is the sum of the
force exerted by each other charge in the system. Charges do not exert a
force on themselves. The electric potential can similarly be written as
$1/r$ and is also additive.
"""
all_charges = np.concatenate((charges, -charges))
all_charges = all_charges[:, None]
r = charges - all_charges
r_mag = np.sqrt((r*r).sum(-1))[:, :, None]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
force = r / r_mag**3
potential = 1. / r_mag
d = np.arange(len(charges))
force[d, d] = 0
force = force.sum(0)
force_r_comp = (charges*force).sum(-1)[:, None]
f_theta = force - force_r_comp*charges
potential[d, d] = 0
potential = 2*potential.sum()
return f_theta, potential
def disperse_charges(hemi, iters, const=.2):
"""Models electrostatic repulsion on the unit sphere
Places charges on a sphere and simulates the repulsive forces felt by each
one. Allows the charges to move for some number of iterations and returns
their final location as well as the total potential of the system at each
step.
Parameters
----------
hemi : HemiSphere
Points on a unit sphere.
iters : int
Number of iterations to run.
const : float
Using a smaller const could provide a more accurate result, but will
need more iterations to converge.
Returns
-------
hemi : HemiSphere
Distributed points on a unit sphere.
potential : ndarray
The electrostatic potential at each iteration. This can be useful to
check if the repulsion converged to a minimum.
Note:
-----
This function is meant to be used with diffusion imaging so antipodal
symmetry is assumed. Therefor each charge must not only be unique, but if
there is a charge at +x, there cannot be a charge at -x. These are treated
as the same location and because the distance between the two charges will
be zero, the result will be unstable.
"""
if not isinstance(hemi, HemiSphere):
raise ValueError("expecting HemiSphere")
charges = hemi.vertices
forces, v = _get_forces(charges)
force_mag = np.sqrt((forces*forces).sum())
const = const / force_mag.max()
potential = np.empty(iters)
v_min = v
for ii in xrange(iters):
new_charges = charges + forces * const
norms = np.sqrt((new_charges**2).sum(-1))
new_charges /= norms[:, None]
new_forces, v = _get_forces(new_charges)
if v <= v_min:
charges = new_charges
forces = new_forces
potential[ii] = v_min = v
else:
const /= 2.
potential[ii] = v_min
return HemiSphere(xyz=charges), potential
def interp_rbf(data, sphere_origin, sphere_target,
function='multiquadric', epsilon=None, smooth=0.1,
norm="angle"):
"""Interpolate data on the sphere, using radial basis functions.
Parameters
----------
data : (N,) ndarray
Function values on the unit sphere.
sphere_origin : Sphere
Positions of data values.
sphere_target : Sphere
M target positions for which to interpolate.
function : {'multiquadric', 'inverse', 'gaussian'}
Radial basis function.
epsilon : float
Radial basis function spread parameter. Defaults to approximate average
distance between nodes.
a good start
smooth : float
values greater than zero increase the smoothness of the
approximation with 0 as pure interpolation. Default: 0.1
norm : str
A string indicating the function that returns the
"distance" between two points.
'angle' - The angle between two vectors
'euclidean_norm' - The Euclidean distance
Returns
-------
v : (M,) ndarray
Interpolated values.
See Also
--------
scipy.interpolate.Rbf
"""
from scipy.interpolate import Rbf
def angle(x1, x2):
xx = np.arccos((x1 * x2).sum(axis=0))
xx[np.isnan(xx)] = 0
return xx
def euclidean_norm(x1, x2):
return np.sqrt(((x1 - x2)**2).sum(axis=0))
if norm == "angle":
norm = angle
elif norm == "euclidean_norm":
w_s = "The Eucldian norm used for interpolation is inaccurate "
w_s += "and will be deprecated in future versions. Please consider "
w_s += "using the 'angle' norm instead"
warnings.warn(w_s, DeprecationWarning)
norm = euclidean_norm
# Workaround for bug in older versions of SciPy that don't allow
# specification of epsilon None:
if epsilon is not None:
kwargs = {'function': function,
'epsilon': epsilon,
'smooth': smooth,
'norm': norm}
else:
kwargs = {'function': function,
'smooth': smooth,
'norm': norm}
rbfi = Rbf(sphere_origin.x, sphere_origin.y, sphere_origin.z, data,
**kwargs)
return rbfi(sphere_target.x, sphere_target.y, sphere_target.z)
def euler_characteristic_check(sphere, chi=2):
r"""Checks the euler characteristic of a sphere
If $f$ = number of faces, $e$ = number_of_edges and $v$ = number of
vertices, the Euler formula says $f-e+v = 2$ for a mesh on a sphere. More
generally, whether $f -e + v == \chi$ where $\chi$ is the Euler
characteristic of the mesh.
- Open chain (track) has $\chi=1$
- Closed chain (loop) has $\chi=0$
- Disk has $\chi=1$
- Sphere has $\chi=2$
- HemiSphere has $\chi=1$
Parameters
----------
sphere : Sphere
A Sphere instance with vertices, edges and faces attributes.
chi : int, optional
The Euler characteristic of the mesh to be checked
Returns
-------
check : bool
True if the mesh has Euler characteristic $\chi$
Examples
--------
>>> euler_characteristic_check(unit_octahedron)
True
>>> hemisphere = HemiSphere.from_sphere(unit_icosahedron)
>>> euler_characteristic_check(hemisphere, chi=1)
True
"""
v = sphere.vertices.shape[0]
e = sphere.edges.shape[0]
f = sphere.faces.shape[0]
return (f - e + v) == chi
octahedron_vertices = np.array(
[[1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, -1.0], ])
octahedron_faces = np.array(
[[0, 4, 2],
[1, 5, 3],
[4, 2, 1],
[5, 3, 0],
[1, 4, 3],
[0, 5, 2],
[0, 4, 3],
[1, 5, 2], ], dtype='uint16')
t = (1 + np.sqrt(5)) / 2
icosahedron_vertices = np.array(
[[t, 1, 0], # 0
[-t, 1, 0], # 1
[t, -1, 0], # 2
[-t, -1, 0], # 3
[1, 0, t], # 4
[1, 0, -t], # 5
[-1, 0, t], # 6
[-1, 0, -t], # 7
[0, t, 1], # 8
[0, -t, 1], # 9
[0, t, -1], # 10
[0, -t, -1], ]) # 11
icosahedron_vertices /= vector_norm(icosahedron_vertices, keepdims=True)
icosahedron_faces = np.array(
[[8, 4, 0],
[2, 5, 0],
[2, 5, 11],
[9, 2, 11],
[2, 4, 0],
[9, 2, 4],
[10, 8, 1],
[10, 8, 0],
[10, 5, 0],
[6, 3, 1],
[9, 6, 3],
[6, 8, 1],
[6, 8, 4],
[9, 6, 4],
[7, 10, 1],
[7, 10, 5],
[7, 3, 1],
[7, 3, 11],
[9, 3, 11],
[7, 5, 11], ], dtype='uint16')
unit_octahedron = Sphere(xyz=octahedron_vertices, faces=octahedron_faces)
unit_icosahedron = Sphere(xyz=icosahedron_vertices, faces=icosahedron_faces)
hemi_icosahedron = HemiSphere.from_sphere(unit_icosahedron)
|
StongeEtienne/dipy
|
dipy/core/sphere.py
|
Python
|
bsd-3-clause
| 20,288
|
[
"Gaussian"
] |
bbb30f81530144b80b174ae9a3b345118e130a52860b6bce869586db1b8393b9
|
# Copyright (C) 2012,2013,2015(H),2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************
espressopp.interaction.DihedralHarmonic
***************************************
The dihedral harmonic potential
.. math::
U(\phi_{ijkl}) = 0.5K[\phi_{ijkl} - \phi_0)]^2
where the `K` is a constant, the angles should be provided in radians.
Reference: Gromacs Manual 4.6.1, section 4.2.11 (page 79-80), equation 4.60
.. function:: espressopp.interaction.DihedralHarmonic(K, phi0)
:param K: (default: 0.0)
:param phi0: (default: 0.0)
:type K: real
:type phi0: real
.. function:: espressopp.interaction.FixedQuadrupleListDihedralHarmonic(system, fql, potential)
:param system:
:param fql:
:param potential:
:type system:
:type fql:
:type potential:
.. function:: espressopp.interaction.FixedQuadrupleListDihedralHarmonic.getFixedQuadrupleList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedQuadrupleListDihedralHarmonic.setPotential(potential)
:param potential:
:type potential:
**Example of usage**
>>> # The following example shows how to add a torsional potential to particles 1,2,3,4
>>> fql = espressopp.FixedQuadrupleList(system.storage)
>>> fql.addQuadruples([(1,2,3,4)])
>>> #phi0 is in radians, IUPAC convention definition
>>> interaction = espressopp.interaction.FixedQuadrupleListDihedralHarmonic(system,fql,potential=espressopp.interaction.DihedralHarmonic(K=1.0,phi0=0.0))
>>> system.addInteraction(interaction)
"""
# pylint: disable=W0401, W0614, W0212
from espressopp.esutil import *
from espressopp.interaction.DihedralPotential import *
from espressopp.interaction.Interaction import *
# pylint: disable=F0401
from _espressopp import interaction_DihedralHarmonic
from _espressopp import interaction_FixedQuadrupleListDihedralHarmonic
from _espressopp import interaction_FixedQuadrupleListTypesDihedralHarmonic
class DihedralHarmonicLocal(DihedralPotentialLocal, interaction_DihedralHarmonic):
def __init__(self, K=0.0, phi0=0.0):
# pylint: disable=W0212
if (not (pmi._PMIComm and pmi._PMIComm.isActive())
or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup()):
cxxinit(self, interaction_DihedralHarmonic, K, phi0)
class FixedQuadrupleListDihedralHarmonicLocal(
InteractionLocal,
interaction_FixedQuadrupleListDihedralHarmonic):
'The (local) DihedralHarmonic interaction using FixedQuadruple lists.'
def __init__(self, system, fql, potential):
if (not (pmi._PMIComm and pmi._PMIComm.isActive())
or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup()):
cxxinit(self, interaction_FixedQuadrupleListDihedralHarmonic, system, fql, potential)
def setPotential(self, potential):
if (not (pmi._PMIComm and pmi._PMIComm.isActive())
or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup()):
self.cxxclass.setPotential(self, potential)
def getFixedQuadrupleList(self):
if (not (pmi._PMIComm and pmi._PMIComm.isActive())
or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup()):
return self.cxxclass.getFixedQuadrupleList(self)
class FixedQuadrupleListTypesDihedralHarmonicLocal(InteractionLocal,
interaction_FixedQuadrupleListTypesDihedralHarmonic):
def __init__(self, system, fql):
if pmi.workerIsActive():
cxxinit(self, interaction_FixedQuadrupleListTypesDihedralHarmonic, system, fql)
def setPotential(self, type1, type2, type3, type4, potential):
if pmi.workerIsActive():
self.cxxclass.setPotential(self, type1, type2, type3, type4, potential)
def getPotential(self, type1, type2, type3, type4):
if pmi.workerIsActive():
return self.cxxclass.getPotential(self, type1, type2, type3, type4)
if pmi.isController:
class DihedralHarmonic(DihedralPotential):
'The DihedralHarmonic potential.'
pmiproxydefs = dict(
cls='espressopp.interaction.DihedralHarmonicLocal',
pmiproperty=['K', 'phi0']
)
class FixedQuadrupleListDihedralHarmonic(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls='espressopp.interaction.FixedQuadrupleListDihedralHarmonicLocal',
pmicall=['setPotential', 'getFixedQuadrupleList']
)
class FixedQuadrupleListTypesDihedralHarmonic(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedQuadrupleListTypesDihedralHarmonicLocal',
pmicall = ['setPotential','getPotential','setFixedQuadrupleList','getFixedQuadrupleList']
)
|
fedepad/espressopp
|
src/interaction/DihedralHarmonic.py
|
Python
|
gpl-3.0
| 5,374
|
[
"ESPResSo",
"Gromacs"
] |
72727b06d8acab67e08eec717368b9c47d3b9864206706d02ccb4b266ffe4de2
|
# BioLite - Tools for processing gene sequence data and automating workflows
# Copyright (c) 2012-2014 Brown University. All rights reserved.
#
# This file is part of BioLite.
#
# BioLite is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BioLite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BioLite. If not, see <http://www.gnu.org/licenses/>.
"""
A series of wrappers for external calls to various bioinformatics tools.
"""
import glob
import math
import operator
import os
import random
import shlex
import subprocess
import sys
import time
from collections import namedtuple
from itertools import chain
import config
import diagnostics
import utils
class BaseWrapper:
"""
A base class that handles generic wrapper functionality.
Wrappers for specific programs should inherit this class, call `self.init`
to specify their `name` (which is a key into the executable entries in the
BioLite configuration file), and append their arguments to the `self.args`
list.
By convention, a wrapper should call `self.run()` as the final line in its
`__init__` function. This allows for clean syntax and use of the wrapper
directly, without assigning it to a variable name, e.g.
wrappers.MyWrapper(arg1, arg2, ...)
When your wrapper runs, BaseWrapper will do the following:
* log the complete command line to diagnostics;
* optionally call the program with a version flag (invoked with `version`)
to obtain a version string, then log this to the :ref:`programs-table`
along with a hash of the binary executable file;
* append the command's stderr to a file called `name`.log in the CWD;
* also append the command's stdout to the same log file, unless you set
`self.stdout`, in which case stdout is redirected to a file of that name;
* on Linux, add a memory profiling library to the LD_PRELOAD environment
variable;
* call the command and check its return code (which should be 0 on success,
unless you specify a different code with `self.return_ok`), optionally
using the CWD specified in `self.cwd` or the environment specified in
`self.env`.
* parse the stderr of the command to find [biolite.profile] markers and
use the rusage values from `utils.safe_call` to populate a profile
entity in the diagnostics with walltime, usertime, systime, mem, and
vmem attributes.
"""
def __init__(self, name, **kwargs):
self.name = name
self.shell = '/bin/sh'
self.cmd = config.get_command(name)
self.args = []
self.return_ok = kwargs.get('return_ok', 0)
self.cwd = kwargs.get('cwd', os.getcwd())
self.stdout = kwargs.get('stdout')
self.stdout_append = kwargs.get('stdout_append')
self.pipe = kwargs.get('pipe')
self.env = os.environ.copy()
self.max_concurrency = kwargs.get('max_concurrency', sys.maxint)
self.output_patterns = None
init = __init__
"""A shortcut for calling the BaseWrapper __init__ from a subclass."""
def check_input(self, flag, path):
"""
Turns path into an absolute path and checks that it exists, then
appends it to the args using the given flag (or None).
"""
path = os.path.abspath(path)
if os.path.exists(path):
if flag:
self.args.append(flag)
self.args.append(path)
else:
utils.die("input file for flag '%s' does not exists:\n %s" % (
flag, path))
def add_threading(self, flag):
"""
Indicates that this wrapper should use threading by appending an
argument with the specified `flag` followed by the number of threads
specified in the BioLite configuration file.
"""
threads = min(int(config.get_resource('threads')), self.max_concurrency)
if threads > 1:
self.args.append(flag)
self.args.append(threads)
def add_openmp(self):
"""
Indicates that this wrapper should use OpenMP by setting the
$OMP_NUM_THREADS environment variable equal to the number of threads
specified in the BioLite configuration file.
"""
threads = min(int(config.get_resource('threads')), self.max_concurrency)
self.env['OMP_NUM_THREADS'] = str(threads)
def version(self, flag=None, cmd=None, path=None):
"""
Generates and logs a hash to distinguish this particular installation
of the program (on a certain host, with a certain compiler, program
version, etc.)
Specify the optional 'binary' argument if the wrapper name is not
actually the program, e.g. if your program has a Perl wrapper script.
Set 'binary' to the binary program that is likely to change between
versions.
Specify the optional 'cmd' argument if the command to run for version
information is different than what will be invoked by `run` (e.g.
if the program has a perl wrapper script, but you want to version an
underlying binary executable).
"""
# Setup the command to run.
if not cmd:
cmd = list(self.cmd)
if flag:
cmd.append(flag)
# Run the command.
try:
vstring = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
vstring = e.output
except OSError as e:
utils.failed_executable(cmd[0], e)
if not path:
path = cmd[0]
# Generate a hash.
vhash = diagnostics.log_program_version(self.name, vstring, path)
if vhash:
diagnostics.prefix.append(self.name)
diagnostics.log('version', vhash)
diagnostics.prefix.pop()
def version_jar(self):
"""
Special case of version() when the executable is a JAR file.
"""
cmd = config.get_command('java')
cmd.append('-jar')
cmd += self.cmd
self.version(cmd=cmd, path=self.cmd[0])
def run(self, cmd=None):
"""
Call this function at the end of your class's `__init__` function.
"""
diagnostics.prefix.append(self.name)
if not cmd:
cmd = self.cmd
stderr = os.path.abspath(self.name + '.log')
self.args.append('2>>'+stderr)
if self.pipe:
self.args += ('|', self.pipe, '2>>'+stderr)
# Write to a stdout file if it was set by the derived class.
# Otherwise, stdout and stderr will be combined into the log file.
if self.stdout:
stdout = os.path.abspath(self.stdout)
self.args.append('1>'+stdout)
diagnostics.log('stdout', stdout)
elif self.stdout_append:
stdout = os.path.abspath(self.stdout_append)
self.args.append('1>>'+stdout)
diagnostics.log('stdout', stdout)
else:
self.args.append('1>>'+stderr)
# Print timestamp to log
open(stderr, 'a').write("[biolite] timestamp=%s\n" % utils.timestamp())
diagnostics.log('log', stderr)
cmd = ' '.join(chain(cmd, map(str, self.args)))
diagnostics.log('command', cmd)
start = time.time()
save_cwd = os.getcwd()
try:
os.chdir(self.cwd)
spawn_pid = os.spawnle(os.P_NOWAIT, self.shell, self.shell, '-c', cmd, self.env)
wait_pid, retcode, rusage = os.wait4(spawn_pid, 0)
if wait_pid != spawn_pid:
utils.die("could not wait for process %d: got %d" % (spawn_pid, wait_pid))
os.chdir(save_cwd)
except OSError as e:
utils.info(e)
utils.die("could not run wrapper for command:\n%s" % cmd)
#utils.failed_executable(exe, e)
elapsed = time.time() - start
retcode = os.WEXITSTATUS(retcode)
if (self.return_ok is not None) and (self.return_ok != retcode):
# Give some context to the non-zero return.
if os.path.isfile(stderr):
subprocess.call(['tail', '-3', stderr])
utils.die("non-zero return (%d) from command:\n%s" % (retcode, cmd))
# Log profile.
diagnostics.prefix.append('profile')
diagnostics.log('name', self.name)
diagnostics.log('return', retcode)
diagnostics.log('walltime', elapsed)
diagnostics.log('usertime', rusage.ru_utime)
diagnostics.log('systime', rusage.ru_stime)
if config.uname == 'Darwin':
diagnostics.log('maxrss', rusage.ru_maxrss / 1024)
else:
diagnostics.log('maxrss', rusage.ru_maxrss)
diagnostics.prefix.pop()
# Reverse any output patterns, since they will be matched against
# program output from the last line backward.
if self.output_patterns:
self.output_patterns.reverse()
diagnostics.log_program_output(stderr, self.output_patterns)
diagnostics.prefix.pop()
def run_jar(self, mem=None):
"""
Special case of run() when the executable is a JAR file.
"""
cmd = config.get_command('java')
if mem:
cmd.append('-Xmx%s' % mem)
cmd.append('-jar')
cmd += self.cmd
self.run(cmd)
### BioLite command line tools ###
class Coverage (BaseWrapper):
"""
usage: coverage [-i SAM] [-o STATS]
Parses a SAM alignment file and writes a coverage table to STATS with
columns for the reference name, the length of the referene, and the number
of reads covering it in the alignment.
"""
def __init__(self, input, *args, **kwargs):
self.init('coverage', **kwargs)
self.version('-v')
self.check_input('-i', input)
self.args += args
self.run()
class Exclude (BaseWrapper):
"""
usage: exclude -x EXCLUDE_FILE [-k] [...] [-i INPUT ...] [-o OUTPUT ...]
Filters all the reads in the input files (FASTA or FASTQ is automatically
detected) and excludes those with ids found in any of the EXCLUDE_FILEs.
If multiple input files are specified, these are treated as paired files.
So if a sequence in one input is excluded, its pair is also excluded from
the same position in all other input files.
If the -k flag is specified, invert the selection to keep instead of exclude.
"""
def __init__(self, excludes, inputs, outputs, *args, **kwargs):
self.init('exclude', **kwargs)
self.version('-v')
for x in excludes:
self.check_input('-x', x)
for i in inputs:
self.check_input('-i', i)
for o in outputs:
self.args += ('-o', o)
self.args += args
self.run()
class Fastq2Fasta (BaseWrapper):
"""
usage: fastq2fasta -i FASTQ [...] [-o FASTA ...] [-q QUAL ...] [-a]
[-t OFFSET] [-s SUFFIX]
Converts each FASTQ input file to a FASTA file and quality score file
with the names <basename>.fasta and <basename>.fasta.qual, where <basename>
is the name of INPUT up to the last period (or with the names FASTA and QUAL
if specified).
FASTA and QUAL are *appended* to (not truncated).
"""
def __init__(self, input, *args, **kwargs):
self.init('fastq2fasta', **kwargs)
self.version('-v')
self.check_input('-i', input)
self.args += args
self.run()
class Fasta2Fastq (BaseWrapper):
"""
usage: fasta2fastq -i FASTA [...] -q QUAL [...] [-o FASTQ] [-a] [-t OFFSET]
Merges each FASTA file and its corresponding QUAL file into a FASTQ file
with the name <basename>.fastq, where <basename> in the FASTA name up to the
last period (or with name FASTQ if specified).
FASTQ is *appended* to (not truncated).
"""
def __init__(self, input, qual, *args, **kwargs):
self.init('fasta2fastq', **kwargs)
self.version('-v')
self.check_input('-i', input)
self.check_input('-q', qual)
self.args += args
self.run()
class FilterIllumina (BaseWrapper):
"""
usage: filter_illumina [-i INPUT ...] [-o OUTPUT ...] [-u UNPAIRED-OUTPUT] [-f]
[-t OFFSET] [-q QUALITY] [-n NREADS] [-a] [-b] [-s SEP]
Filters out low-quality and adapter-contaminated reads from Illumina data.
If multiple input files are specified, these are treated as paired files.
So if a sequence in one input is filtered, its pair is also filtered from
the same position in all other input files.
"""
def __init__(self, inputs, outputs, *args, **kwargs):
self.init('filter_illumina', **kwargs)
self.version('-v')
for i in inputs:
self.check_input('-i', i)
for o in outputs:
self.args += ('-o', o)
self.args += args
self.run()
class InsertStats (BaseWrapper):
"""
usage: insert_stats -i SAM -o HIST -m MAX_INSERT
Reads a SAM alignment file and uses it to estimate the mean and std. dev.
of the insert size of the mapped paired-end reads. A histogram of all insert
sizes encountered is written to the HIST file.
"""
def __init__(self, input, *args, **kwargs):
self.init('insert_stats', **kwargs)
self.version('-v')
self.check_input('-i', input)
self.args += ('-m', kwargs.get('max_insert',
config.get_resource('max_insert_size')))
self.args += args
self.run()
class Interleave (BaseWrapper):
"""
usage: interleave -i INPUT [...] [-o OUTPUT] [-s SEP]
Interleaves the records in the input files (FASTA or FASTQ is automatically
detected) and writes them to OUTPUT, or to stdout if no OUTPUT is specified.
"""
def __init__(self, inputs, output, *args, **kwargs):
self.init('interleave', **kwargs)
self.version('-v')
for i in inputs:
self.args += ('-i', i)
self.args += ('-o', output)
self.args += args
self.run()
class Randomize (BaseWrapper):
"""
usage: randomize [-i INPUT] [-o OUTPUT] [-r READ-ORDER] [-w WRITE-ORDER]
Randomizes the order of sequences in each INPUT file and writes these to a
corresponding OUTPUT file. By default, a new random write order is generated
and saved to WRITE-ORDER, if specified. Alternatively, specifying a
READ-ORDER file uses that order instead of a random one.
"""
def __init__(self, input, *args, **kwargs):
self.init('randomize', **kwargs)
self.version('-v')
self.check_input('-i', input)
self.args += args
self.run()
### Third-party command line tools ###
class Abacas (BaseWrapper):
"""
ABACAS: Algorithm Based Automatic Contiguation of Assembled Sequences
http://abacas.sourceforge.net
"""
def __init__(self, contigs, reference, program, *args, **kwargs):
self.init('abacas', **kwargs)
self.args += ('-q', contigs, '-r', reference, '-p', program) + args
self.run()
class BlastN (BaseWrapper):
"""
blastn from NCBI Blast+
http://blast.ncbi.nlm.nih.gov/
"""
def __init__(self, query, db, *args, **kwargs):
self.init('blastn', **kwargs)
self.version('-version')
self.args += ('-query', query, '-db', os.path.abspath(db))
self.add_threading('-num_threads')
self.args += args
self.run()
class BlastP (BaseWrapper):
"""
blastp from NCBI Blast+
http://blast.ncbi.nlm.nih.gov/
"""
def __init__(self, query, db, *args, **kwargs):
self.init('blastp', **kwargs)
self.version('-version')
self.args += ('-query', query, '-db', os.path.abspath(db))
self.add_threading('-num_threads')
self.args += args
self.run()
class BlastX (BaseWrapper):
"""
blastx from NCBI Blast+
http://blast.ncbi.nlm.nih.gov/
"""
def __init__(self, query, db, *args, **kwargs):
self.init('blastx', **kwargs)
self.version('-version')
self.args += ('-query', query, '-db', os.path.abspath(db))
self.add_threading('-num_threads')
self.args += args
self.run()
class Bowtie2 (BaseWrapper):
"""
A wrapper for the bowtie2 short-read aligner.
http://bowtie-bio.sourceforge.net/
For paired inputs, you can specify the maximum insert size (e.g. the
length of the gap between the reads) with the 'max_insert' keyword
argument. If you don't specify one, the diagnostics database will be
searched for a previous run of the 'insert_size' pipeline for an estimate.
"""
def __init__(self, inputs, db, *args, **kwargs):
self.init('bowtie2', **kwargs)
self.version('--version', config.get_command('bowtie2-align'))
if isinstance(inputs, basestring):
self.check_input('-U', inputs)
elif len(inputs) == 1:
self.check_input('-U', inputs[0])
elif len(inputs) == 2:
self.check_input('-1', inputs[0])
self.check_input('-2', inputs[1])
self.args.append('-X')
self.args.append(
kwargs.get('max_insert', diagnostics.lookup_insert_size().max))
else:
utils.die("Bowtie2 wrapper expects either 1 (SE) or 2 (PE) inputs")
self.args += ('-x', db)
self.add_threading('-p')
self.args += args
self.output_patterns = map(diagnostics.OutputPattern._make, [
(r"(\d+) reads; of these:$",0,"nreads"),
(r" (\d+) \S+ were paired; of these:$",0,"npairs"),
(r" (\d+) \S+ aligned concordantly 0 times$",0,"nconcord0"),
(r" (\d+) \S+ aligned concordantly exactly 1 time$",0,"nconcord1"),
(r" (\d+) \S+ aligned concordantly >1 times$",0,"nconcord2"),
(r" (\d+) \S+ aligned discordantly 1 time$",0,"ndiscord1"),
(r" (\d+) mates make up the pairs; of these:$",0,"nunpaired"),
(r" (\d+) \S+ aligned 0 times$",0,"nunpaired0"),
(r" (\d+) \S+ aligned exactly 1 time$",0,"nunpaired1"),
(r" (\d+) \S+ aligned >1 times$",0,"nunpaired2")])
self.run()
class Bowtie2Build (BaseWrapper):
"""
A wrapper for bowtie2-build component of Bowtie2.
http://bowtie-bio.sourceforge.net/
"""
def __init__(self, input, db, *args, **kwargs):
self.init('bowtie2-build', **kwargs)
self.version('--version')
self.check_input(None, input)
self.args.append(db)
self.args += args
self.run()
class Chrysalis (BaseWrapper):
"""
The Chrysalis component of the Trinity RNA-seq assembler:
http://trinityrnaseq.sourceforge.net
"""
def __init__(self, input, iworm, *args, **kwargs):
self.init('chrysalis', **kwargs)
self.add_threading('-cpu')
self.add_openmp()
self.check_input('-i', input)
self.check_input('-iworm', iworm)
mem = utils.mem_to_mb(config.get_resource('memory'))
self.args += ('-sort_buffer_size', '%dM' % int(0.8 * mem))
self.args += args
self.run()
class Dustmasker (BaseWrapper):
"""
dustmasker from NCBI Blast+
http://blast.ncbi.nlm.nih.gov/
"""
def __init__(self, input, *args, **kwargs):
self.init('dustmasker', **kwargs)
self.version('-version-full')
self.check_input('-in', input)
self.args += args
self.run()
class FastQC (BaseWrapper):
"""
A wrapper for FastQC.
http://www.bioinformatics.bbsrc.ac.uk/projects/fastqc/
"""
def __init__(self, input, *args, **kwargs):
self.init('fastqc', **kwargs)
self.version('-v')
self.add_threading('-t')
self.args += args
self.check_input(None, input)
self.run()
class Gblocks (BaseWrapper):
"""
Selection of conserved block from multiple sequence alignments for
phylogenetics
http://molevol.cmima.csic.es/castresana/Gblocks.html
"""
def __init__(self, input, *args, **kwargs):
self.init('Gblocks', **kwargs)
# Ignore Gblocks broken exit code
self.return_ok = None
self.check_input(None, input)
self.args += args
self.run()
class Inchworm (BaseWrapper):
"""
The inchworm component of the Trinity RNA-seq assembler:
http://trinityrnaseq.sourceforge.net
"""
def __init__(self, mode, input, *args, **kwargs):
self.init('inchworm', **kwargs)
self.check_input(mode, input)
self.add_openmp()
self.args.append('--run_inchworm')
self.args += args
self.run()
class JellyfishCount (BaseWrapper):
"""
"""
def __init__(self, input, kmer, *args, **kwargs):
self.init('jellyfish', **kwargs)
self.version('--version')
#
# From the Jellyfish manual (section 2.3), hash size in bytes is:
#
# 2^l * (2k-l+r+1)/8
#
# s = 2^l is the hash size parameter given by -s. By default, r=5 and
# letting l be 0:
#
# s ~= 8 * mem / (2k + 6)
#
mem = 1048576 * utils.mem_to_mb(config.get_resource('memory'))
mem = 2 ** int(math.log(8 * mem / (2*kmer + 6) , 2))
self.args += ('count', '-m', kmer, '-s', mem)
self.add_threading('-t')
self.check_input(None, input)
self.args += args
self.run()
class JellyfishDump (BaseWrapper):
"""
"""
def __init__(self, input, *args, **kwargs):
self.init('jellyfish', **kwargs)
self.version('--version')
self.args.append('dump')
self.check_input(None, input)
self.args += args
self.run()
class MultiBlast (BaseWrapper):
"""
usage: multiblast BLAST THREADS QUERY_LIST OUT [ARGS]
Runs a Blast PROGRAM (e.g. blastx, blastn, blastp) in parallel on a list of
queries (in QUERY_LIST). Additional arguments to PROGRAM can be appended as
ARGS.
The PROGRAM is called on each query with threading equal to THREADS.
Recommendation: set THREADS to the number of cores divided by the number of
query files.
The individual XML outputs for each query file are concatenated into a single
output file OUT.
Example usage:
multiblast blastn 4 "query1.fa query2.fa" all-queries.xml -e 1e-6
"""
def __init__(self, blast, threads, qlist, db, out, evalue=0.0001, targets=20):
if not glob.glob(db + '.*'):
utils.die("missing blast database '%s'" % db)
self.init(blast)
self.version('-version')
self.args += (
threads, ' '.join(qlist), out,
'-db', os.path.abspath(db), '-evalue', evalue,
'-max_target_seqs', targets)
self.run(config.get_command('multiblast') + self.cmd)
class Macse (BaseWrapper):
"""
Multiple alignment of coding sequences.
"""
def __init__(self, input, output, *args, **kwargs):
self.init('macse')
self.version_jar()
self.check_input('-i', input)
self.args += ('-o', output)
self.args += args
mem = 0.9 * utils.mem_to_mb(config.get_resource('memory'))
self.run_jar('%dM' % mem)
class MakeBlastDB (BaseWrapper):
"""
makeblastdb from NCBI Blast+
http://blast.ncbi.nlm.nih.gov/
"""
def __init__(self, input, db, dbtype, *args, **kwargs):
self.init('makeblastdb', **kwargs)
self.version('-version')
self.check_input('-in', input)
self.args += ('-dbtype', dbtype, '-out', os.path.abspath(db))
self.args += args
self.run()
class Mcl (BaseWrapper):
"""
Markov Clustering Algorithm (MCL) for analysis of networks
http://micans.org/mcl/
"""
def __init__(self, input, *args, **kwargs):
self.init('mcl', **kwargs)
self.version('--version')
self.check_input(None, input)
self.add_threading('-te')
self.args += args
self.run()
class Minimo (BaseWrapper):
"""
Minimo: overlap graph assembler for small data sets
From the AMOS assembler package.
http://amos.sourceforge.net
"""
def __init__(self, fasta, *args, **kwargs):
self.init('Minimo', **kwargs)
self.args.append(fasta)
self.args += args
self.run()
class Oases (BaseWrapper):
"""
Oases, a *de novo* transcriptome assembler
http://www.ebi.ac.uk/~zerbino/oases/
"""
def __init__(self, workdir, *args, **kwargs):
self.init('oases')
self.version()
self.args.append(workdir)
self.args += args
self.run()
class Oma (BaseWrapper):
"""
"""
def __init__(self, **kwargs):
self.init('oma', **kwargs)
parameters = """
ReuseCachedResults := true;
NP := %d;
MinScore := 181;
LengthTol := 0.61;
StablePairTol := 1.81;
VerifiedPairTol := 1.53;
MinSeqLen := 50;
StableIdsForGroups := false;
DoHierarchicalGroups := true;
MaxTimePerLevel := 1200;
SpeciesTree := 'estimate';
ReachabilityCutoff := 0.65;
UseEsprit := false;
DistConfLevel := 2;
MinProbContig := 0.4;
MaxContigOverlap := 5;
MinSeqLenContig := 20;
MinBestScore := 250;
""" % int(config.get_resource('threads'))
open(os.path.join(self.cwd, 'parameters.drw'), 'w').write(parameters)
self.run()
class PartitionChrysalis (BaseWrapper):
"""
The partitioning script for the Chrysalis component of the Trinity RNA-seq
assembler:
http://trinityrnaseq.sourceforge.net
"""
def __init__(self, debruijn, reads, *args, **kwargs):
self.init('partition_chrysalis', **kwargs)
self.check_input('--deBruijns', debruijn)
self.check_input('--componentReads', reads)
self.args += args
self.run()
class Parallel (BaseWrapper):
"""
GNU parallel utility
http://www.gnu.org/software/parallel/
"""
def __init__(self, commands, *args, **kwargs):
self.init('parallel', **kwargs)
self.version('--version')
self.args += (
'--gnu', '-a', commands, '-j',
kwargs.get('threads', config.get_resource('threads')))
hostlist = config.get_resource_default('hostlist', None)
if hostlist:
self.args += ('-S', hostlist)
if self.cwd:
self.args += ('--wd', self.cwd)
else:
self.args += ('--wd', os.getcwd())
self.args += args
self.run()
class Raxml (BaseWrapper):
"""
Maximum Likelihood based inference of phylogenetic trees.
"""
def __init__(self, input, *args, **kwargs):
self.init('raxml', **kwargs)
self.version('-v')
self.check_input('-s', input)
self.add_threading('-T')
self.args += args
self.run()
class RaxmlMpi (BaseWrapper):
"""
Maximum Likelihood based inference of phylogenetic trees
(MPI version).
"""
def __init__(self, mpirun, input, *args, **kwargs):
self.init('raxml-mpi', **kwargs)
self.cmd.insert(0, mpirun)
self.version('-v')
self.check_input('-s', input)
self.args += args
self.run()
class RaxmlHybrid (BaseWrapper):
"""
Maximum Likelihood based inference of phylogenetic trees
(MPI-hybrid version).
"""
def __init__(self, mpirun, input, *args, **kwargs):
self.init('raxml-hybrid', **kwargs)
self.cmd.insert(0, mpirun)
self.version('-v')
self.check_input('-s', input)
self.add_threading('-T')
self.args += args
self.run()
class RpsBlast (BaseWrapper):
"""
rpsblast from NCBI Blast+
http://blast.ncbi.nlm.nih.gov/
"""
def __init__(self, query, db, *args, **kwargs):
self.init('rpsblast', **kwargs)
self.version('-version')
self.args += ('-query', query, '-db', os.path.abspath(db))
self.add_threading('-num_threads')
self.args += args
self.run()
class RsemReference (BaseWrapper):
"""
http://deweylab.biostat.wisc.edu/rsem/
"""
def __init__(self, input, prefix, *args, **kwargs):
self.init('rsem-prepare-reference', **kwargs)
self.args += args
self.args += (input, prefix)
self.run()
class RsemExpression (BaseWrapper):
"""
http://deweylab.biostat.wisc.edu/rsem/
"""
def __init__(self, inputs, prefix, name, *args, **kwargs):
self.init('rsem-calculate-expression', **kwargs)
self.add_threading('--num-threads')
max_insert = kwargs.get('max_insert')
if len(inputs) == 2:
if max_insert is None:
max_insert = diagnostics.lookup_insert_size().max
self.args += ('--paired-end', '--fragment-length-max', int(max_insert))
self.args += args
for input in inputs:
if input.endswith('.gz'):
self.shell = '/bin/bash'
self.args.append('<(gzip -dc %s)' % input)
else:
self.args.append(input)
self.args += (prefix, name)
self.output_patterns = map(diagnostics.OutputPattern._make, [
(r"# reads processed: (\d+)$",0,"nreads"),
(r"# reads with at least one reported alignment: (\d+) \S+$",0,"naligned"),
(r"# reads that failed to align: (\d+) \S+$",0,"nfailed")])
self.run()
class SamTools (BaseWrapper):
def __init__(self, input, *args, **kwargs):
self.init('samtools', **kwargs)
self.version()
self.args += args
self.check_input(None, input)
self.run()
class SamView (BaseWrapper):
def __init__(self, input_path, regions, output_path):
self.init('samtools')
self.version()
self.args += ('view', '-o', output_path, input_path)
self.args += regions
self.run()
class SamToolsSort (BaseWrapper):
def __init__(self, input, prefix, *args, **kwargs):
self.init('samtools', **kwargs)
self.version()
self.args.append('sort')
self.args += args
self.check_input(None, input)
self.args.append(prefix)
self.run()
class SamIndex (BaseWrapper):
def __init__(self, input_path):
self.init('samtools')
self.version()
self.args += ('index', input_path)
self.run()
class SamPileup (BaseWrapper):
def __init__(self, reference_path, bam_path, output_path):
self.init('samtools')
self.version()
self.args += (
'mpileup', '-BQ0', '-d1000000000', '-f', reference_path, bam_path)
self.stdout = output_path
self.run()
class Spades (BaseWrapper):
"""
SPAdes de novo assembler
http://bioinf.spbau.ru/spades
"""
def __init__(self, inputs, *args, **kwargs):
self.init("spades.py", **kwargs)
self.name = "spades"
self.version()
self.add_threading("-t")
mem = 0.9 * utils.mem_to_mb(config.get_resource('memory')) / 1024
self.args += ("-m", max(1, int(mem)))
# Detect inputs
if isinstance(inputs, basestring):
self.check_input('-s', inputs)
elif len(inputs) == 1:
self.check_input('-s', inputs[0])
elif len(inputs) == 2:
self.check_input('-1', inputs[0])
self.check_input('-2', inputs[1])
else:
utils.die("expected either 1 (SE) or 2 (PE) inputs")
self.args += args
self.run()
class Sqlite3 (BaseWrapper):
def __init__(self, dbpath, sql, *args, **kwargs):
self.init('sqlite3', **kwargs)
self.version('-version')
self.args += args
self.check_input(None, dbpath)
self.args.append('"%s"' % sql.replace('"', '\"'))
self.run()
class TBlastX (BaseWrapper):
"""
tblastx from NCBI Blast+
http://blast.ncbi.nlm.nih.gov/
"""
def __init__(self, query, db, *args, **kwargs):
self.init('tblastx', **kwargs)
self.version('-version')
self.args += ('-query', query, '-db', os.path.abspath(db))
self.add_threading('-num_threads')
self.args += args
self.run()
class Transdecoder (BaseWrapper):
"""
Identification of candidate coding sequences
http://transdecoder.sourceforge.net
"""
def __init__(self, input, **kwargs):
self.init('transdecoder', **kwargs)
self.check_input('-t', input)
self.run()
class Trinity (BaseWrapper):
"""
Trinity RNA-seq assembler
http://trinityrnaseq.sourceforge.net
"""
def __init__(self, inputs, *args, **kwargs):
self.init('trinity', **kwargs)
self.version('--version')
# Detect inputs
if isinstance(inputs, basestring):
self.check_input('--single', inputs)
elif len(inputs) == 1:
self.check_input('--single', inputs[0])
elif len(inputs) == 2:
self.check_input('--left', inputs[0])
self.check_input('--right', inputs[1])
else:
utils.die("expected either 1 (SE) or 2 (PE) inputs")
# Detect file type
ext = os.path.splitext(self.args[-1])[1]
if ext == '.fa':
self.args += ('--seqType', 'fa')
elif ext == '.fq':
self.args += ('--seqType', 'fq')
else:
utils.info("warning: could not determine sequence type of inputs")
# Java uses roughly 2 CPUs per Butterfly call with GC etc. so reduce
# the number of threads by half.
#threads = kwargs.get('threads', int(config.get_resource('threads')))
#self.max_concurrency = max(1, threads/2)
mem = utils.mem_to_mb(config.get_resource('memory'))
self.args += ("--JM", "%dG" % max(1, int(0.8*mem/1024)))
max_insert = kwargs.get(
'max_insert',
diagnostics.lookup_insert_size().max)
if max_insert:
self.args += ('--group_pairs_distance', int(max_insert))
self.add_threading('--CPU')
self.args += args
self.run()
class VelvetH (BaseWrapper):
"""
velveth component of the Velvet *de novo* assember
http://www.ebi.ac.uk/~zerbino/velvet/
"""
def __init__(self, outdir, kmer, *args, **kwargs):
self.init('velveth', **kwargs)
self.version()
self.max_concurrency = 16
self.add_openmp()
self.args += (outdir, kmer) + args
self.run()
class VelvetG (BaseWrapper):
"""
velvetg component of the Velvet *de novo* assember
http://www.ebi.ac.uk/~zerbino/velvet/
"""
def __init__(self, outdir, *args, **kwargs):
self.init('velvetg', **kwargs)
self.version()
self.max_concurrency = 16
self.add_openmp()
self.args.append(outdir)
self.args += args
self.run()
class VelvetOptimiser (BaseWrapper):
"""
Perl script for automatically optimising the three primary parameters
of the Velvet assembler
http://bioinformatics.net.au/software.velvetoptimiser.shtml
"""
def __init__(self, velveth, *args, **kwargs):
self.init('VelvetOptimiser.pl', **kwargs)
self.args.append('-f "%s"' % velveth)
self.args += args
self.run()
class Yasra (BaseWrapper):
"""
YASRA: comparative assembly of short reads using a reference genome
http://www.bx.psu.edu/miller_lab
"""
def __init__(self, *args, **kwargs):
self.init('make', **kwargs)
self.name = 'yasra'
if not os.path.exists(os.path.join(self.cwd, 'Makefile')):
utils.die("couldn't find YASRA Makefile in dir '%s'" % self.cwd)
self.args += args
self.run()
class YasraMakefile (BaseWrapper):
"""
Utility script for generating a Makefile for a YASRA run
"""
def __init__(self, reads, template, *args, **kwargs):
self.init('yasra_makefile', **kwargs)
self.args += (reads, template)
self.args += args
self.run()
# vim: noexpandtab ts=4 sw=4
|
cypridina/gloTK
|
gloTK/wrappers_biolite.py
|
Python
|
gpl-3.0
| 32,239
|
[
"BLAST",
"Bowtie"
] |
a3b42fc286bb7ce97005f7a97dd222334ed73f88b01ad5d0400e838746ea9faa
|
"""AMBER force-field parameters"""
atoms_per_residue = {
'GLH': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'OE2', 'HE2', 'C', 'O'],
'ILE': ['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG2', 'HG21', 'HG22', 'HG23', 'CG1', 'HG12', 'HG13', 'CD1', 'HD11',
'HD12',
'HD13', 'C', 'O'],
'DTN': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'C7', 'H71',
'H72', 'H73', 'C4', 'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'", 'H3T'],
'GLN': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22', 'C', 'O'],
'DG': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7',
'C5', 'C6', 'O6', 'N1', 'H1', 'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2",
"O3'"],
'DA3': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7',
'C5', 'C6', 'N6', 'H61', 'H62', 'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'",
'H3T'],
'DC': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5',
'H5', 'C4', 'N4', 'H41', 'H42', 'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'"],
'DA': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7',
'C5', 'C6', 'N6', 'H61', 'H62', 'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'"],
'GLY': ['N', 'H', 'CA', 'HA2', 'HA3', 'C', 'O'],
'RCN': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4',
'N4', 'H41', 'H42', 'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'", 'H3T'],
'HIP': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'HD1', 'CE1', 'HE1', 'NE2', 'HE2', 'CD2', 'HD2', 'C',
'O'],
'TYR': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CE1', 'HE1', 'CZ', 'OH', 'HH', 'CE2', 'HE2',
'CD2', 'HD2', 'C', 'O'],
'RU3': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5',
'H5', 'C4', 'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'", 'H3T'],
'DT': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5',
'C7', 'H71', 'H72', 'H73', 'C4', 'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'"],
'ALA': ['N', 'H', 'CA', 'HA', 'CB', 'HB1', 'HB2', 'HB3', 'C', 'O'],
'GLU': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'OE2', 'C', 'O'],
'RGN': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6',
'O6', 'N1', 'H1', 'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'",
'H3T'],
'RU5': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4',
'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'"],
'DCN': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4',
'N4', 'H41', 'H42', 'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'", 'H3T'],
'RU': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5',
'H5', 'C4', 'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'"],
'ASP': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'OD1', 'OD2', 'C', 'O'],
'SER': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'OG', 'HG', 'C', 'O'],
'LYS': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ',
'HZ1', 'HZ2', 'HZ3', 'C', 'O'],
'RAN': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6',
'N6', 'H61', 'H62', 'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'", 'H3T'],
'DAN': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6',
'N6', 'H61', 'H62', 'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'", 'H3T'],
'CYX': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'SG', 'C', 'O'],
'DGN': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6',
'O6', 'N1', 'H1', 'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'", 'H3T'],
'RG': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7',
'C5', 'C6', 'O6', 'N1', 'H1', 'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'",
"HO'2",
"O3'"],
'HID': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'HD1', 'CE1', 'HE1', 'NE2', 'CD2', 'HD2', 'C', 'O'],
'RA': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7',
'C5', 'C6', 'N6', 'H61', 'H62', 'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2",
"O3'"],
'RC': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5',
'H5', 'C4', 'N4', 'H41', 'H42', 'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'"],
'LYN': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ',
'HZ2', 'HZ3', 'C', 'O'],
'ASH': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'OD1', 'OD2', 'HD2', 'C', 'O'],
'ASN': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'OD1', 'ND2', 'HD21', 'HD22', 'C', 'O'],
'CYM': ['N', 'HN', 'CA', 'HA', 'CB', 'HB3', 'HB2', 'SG', 'C', 'O'],
'HIE': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'CE1', 'HE1', 'NE2', 'HE2', 'CD2', 'HD2', 'C', 'O'],
'CYS': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'SG', 'HG', 'C', 'O'],
'VAL': ['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG1', 'HG11', 'HG12', 'HG13', 'CG2', 'HG21', 'HG22', 'HG23', 'C', 'O'],
'THR': ['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG2', 'HG21', 'HG22', 'HG23', 'OG1', 'HG1', 'C', 'O'],
'DG3': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7',
'C5', 'C6', 'O6', 'N1', 'H1', 'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2",
"O3'",
'H3T'],
'RA5': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6',
'N6', 'H61', 'H62', 'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'"],
'RA3': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7',
'C5', 'C6', 'N6', 'H61', 'H62', 'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2",
"O3'", 'H3T'],
'DG5': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6',
'O6', 'N1', 'H1', 'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'"],
'TRP': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'NE1', 'HE1', 'CE2', 'CZ2', 'HZ2', 'CH2',
'HH2',
'CZ3', 'HZ3', 'CE3', 'HE3', 'CD2', 'C', 'O'],
'DC5': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4',
'N4', 'H41', 'H42', 'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'"],
'DC3': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5',
'H5', 'C4', 'N4', 'H41', 'H42', 'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'", 'H3T'],
'RG3': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7',
'C5', 'C6', 'O6', 'N1', 'H1', 'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'",
"HO'2",
"O3'", 'H3T'],
'RUN': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4',
'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'", 'H3T'],
'RG5': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6',
'O6', 'N1', 'H1', 'C2', 'N2', 'H21', 'H22', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'"],
'DA5': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N9', 'C8', 'H8', 'N7', 'C5', 'C6',
'N6', 'H61', 'H62', 'N1', 'C2', 'H2', 'N3', 'C4', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'"],
'RC5': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'H5', 'C4',
'N4', 'H41', 'H42', 'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'"],
'PHE': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CE1', 'HE1', 'CZ', 'HZ', 'CE2', 'HE2', 'CD2',
'HD2', 'C', 'O'],
'RC3': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5',
'H5', 'C4', 'N4', 'H41', 'H42', 'N3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "O2'", "HO'2", "O3'", 'H3T'],
'MET': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'SD', 'CE', 'HE1', 'HE2', 'HE3', 'C', 'O'],
'LEU': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG', 'CD1', 'HD11', 'HD12', 'HD13', 'CD2', 'HD21', 'HD22',
'HD23', 'C', 'O'],
'ARG': ['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1',
'HH11', 'HH12', 'NH2', 'HH21', 'HH22', 'C', 'O'],
'DT3': ['P', 'O1P', 'O2P', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5',
'C7', 'H71', 'H72', 'H73', 'C4', 'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'",
'H3T'], 'PRO': ['N', 'CD', 'HD2', 'HD3', 'CG', 'HG2', 'HG3', 'CB', 'HB2', 'HB3', 'CA', 'HA', 'C', 'O'],
'DT5': ['H5T', "O5'", "C5'", "H5'1", "H5'2", "C4'", "H4'", "O4'", "C1'", "H1'", 'N1', 'C6', 'H6', 'C5', 'C7', 'H71',
'H72', 'H73', 'C4', 'O4', 'N3', 'H3', 'C2', 'O2', "C3'", "H3'", "C2'", "H2'1", "H2'2", "O3'"]}
charges = {'DC3-H3T': 0.4396, "RGN-O2'": -0.6139, 'RAN-H5T': 0.4295, "RU-C4'": 0.1065, "DT-C3'": 0.0713,
"RAN-C2'": 0.067,
"RU-O4'": -0.3548, "DTN-H2'1": 0.0718, "DTN-H2'2": 0.0718, 'LYS-HB2': 0.0362, "DG3-H2'1": 0.0718,
"DG3-H2'2": 0.0718,
'ILE-HG23': 0.0882, 'HID-NE2': -0.5727, 'GLU-CD': 0.8054, 'GLU-CG': 0.0136, 'GLU-CA': 0.0397,
'GLU-CB': 0.056,
"DC3-H3'": 0.0985, 'DT-H3': 0.342, 'DG-O1P': -0.7761, "DA5-H1'": 0.1838, "RC5-O2'": -0.6139,
'DCN-C2': 0.7959,
'DCN-C6': -0.0183, 'DCN-C5': -0.5222, 'DCN-C4': 0.8439, 'CYX-CB': -0.079, 'HIE-ND1': -0.5432,
'CYX-CA': 0.0429,
'RU3-C2': 0.4687, "RCN-O4'": -0.3548, 'SER-HB2': 0.0352, 'SER-HB3': 0.0352, 'DC5-N4': -0.9773,
'DC5-N1': -0.0339,
'DC5-N3': -0.7748, 'RC5-H5T': 0.4295, "DCN-C1'": -0.0116, 'GLU-HG2': -0.0425, "RA5-C5'": 0.0558,
'RGN-N7': -0.5709,
'RGN-N2': -0.9672, 'RGN-N3': -0.6323, 'RGN-N1': -0.4787, 'RGN-N9': 0.0492, 'RUN-O2': -0.5477,
'DT5-H6': 0.2607,
'DT5-H3': 0.342, "RG5-O3'": -0.5246, 'RA3-O1P': -0.776, "DTN-C2'": -0.0854, 'DTN-H5T': 0.4422,
'TYR-HE2': 0.1656,
'TYR-HE1': 0.1656, 'RCN-C5': -0.5215, 'RCN-C4': 0.8185, 'RCN-C6': 0.0053, 'RCN-C2': 0.7538,
'CYM-HB3': 0.1122,
'CYM-HB2': 0.1122, "RA3-HO'2": 0.4186, "DC3-C1'": -0.0116, "DT5-H2'1": 0.0718, "DT5-H2'2": 0.0718,
'RU3-O4': -0.5761,
"DT3-C2'": -0.0854, 'RU3-O2': -0.5477, 'VAL-HA': 0.0969, 'VAL-HB': -0.0297, 'PRO-HA': 0.0641,
'DG5-H5T': 0.4422,
"RG5-C5'": 0.0558, "DA-O5'": -0.4954, "RG3-C1'": 0.0191, "RU-HO'2": 0.4186, 'DG5-N1': -0.5053,
'DG5-N3': -0.6636,
'DG5-N2': -0.923, 'DG5-N7': -0.5725, 'DG5-N9': 0.0577, "RUN-C1'": 0.0674, "RA5-O5'": -0.6223,
"RCN-C4'": 0.1065,
'DA5-N9': -0.0268, 'DG3-C6': 0.4918, 'DA5-N3': -0.7417, 'DA5-N1': -0.7624, 'DA5-N7': -0.6175,
'DA5-N6': -0.9123,
'RG-P': 1.1662, 'ILE-HG21': 0.0882, 'ILE-HG22': 0.0882, "RC3-H5'1": 0.0679, "RC3-H5'2": 0.0679,
'DA-H2': 0.0598,
'DA-H8': 0.1877, 'RAN-H61': 0.4115, 'RAN-H62': 0.4115, 'DAN-N3': -0.7417, 'DAN-N1': -0.7624,
'DAN-N6': -0.9123,
'DAN-N7': -0.6175, "RC3-H1'": 0.2029, 'LYS-HE2': 0.1135, 'RU-C2': 0.4687, 'DAN-N9': -0.0268, 'RU-C4': 0.5952,
'RU-C5': -0.3635, 'RU-C6': -0.1126, 'RC5-H42': 0.4234, 'RC5-H41': 0.4234, "DA-C5'": -0.0069,
"RC5-O3'": -0.5246,
"RC5-H4'": 0.1174, "RA-H1'": 0.2007, "RU3-O4'": -0.3548, 'MET-HE1': 0.0684, "DG-H1'": 0.1746,
'MET-HE2': 0.0684,
"RCN-O3'": -0.6541, 'ARG-NE': -0.5295, 'DT-H6': 0.2607, 'DCN-H42': 0.4314, "DA5-H2'2": 0.0718,
"DC-C1'": -0.0116,
"RAN-H2'1": 0.0972, 'RGN-O6': -0.5597, "RU3-H5'2": 0.0679, "RU3-H5'1": 0.0679, "DT3-C1'": 0.068,
'RA-N7': -0.6073,
'DCN-N1': -0.0339, 'DCN-N3': -0.7748, 'DCN-N4': -0.9773, 'RA-N1': -0.7615, "DTN-C3'": 0.0713,
'RA-N9': -0.0251,
'LEU-CG': 0.3531, 'LEU-CA': -0.0518, 'LEU-CB': -0.1102, 'HIP-HB3': 0.081, 'HIP-HB2': 0.081,
"DTN-H4'": 0.1176,
'RG3-O6': -0.5597, 'ASN-ND2': -0.9191, "DT-H4'": 0.1176, "DC5-H1'": 0.1963, "DG5-H4'": 0.1176,
'RU3-N3': -0.3549,
'RU3-N1': 0.0418, "DGN-C3'": 0.0713, "DA-O4'": -0.3691, "RG3-C2'": 0.067, 'RU3-P': 1.1662,
'THR-HG23': 0.0642,
'THR-HG21': 0.0642, 'LYS-C': 0.7341, 'LYS-N': -0.3479, 'LYS-O': -0.5894, 'LYS-H': 0.2747, 'DG3-O1P': -0.7761,
"DAN-H1'": 0.1838, 'DA5-C2': 0.5716, 'DA5-C4': 0.38, 'DA5-C5': 0.0725, 'DA5-C6': 0.6897, 'DA5-C8': 0.1607,
'RC3-O1P': -0.776, 'ASP-HB2': -0.0122, 'ASP-HB3': -0.0122, 'RC-N3': -0.7584, "DC3-H1'": 0.1963,
'RG5-H5T': 0.4295,
'LYS-HD2': 0.0621, 'LYS-HD3': 0.0621, 'DG5-C6': 0.4918, 'DG5-C4': 0.1814, 'CYM-CB': -0.2413,
'DG5-C2': 0.7432,
'DG5-C8': 0.0736, 'DT5-C5': 0.0025, 'DTN-C4': 0.5194, 'DTN-C5': 0.0025, 'DTN-C6': -0.2209, 'DT5-C6': -0.2209,
'DTN-C2': 0.5677, "RU-H5'2": 0.0679, "RU-H5'1": 0.0679, 'DG-H21': 0.4235, 'DG-H22': 0.4235,
"DA3-H1'": 0.1838,
'ILE-HD12': 0.0186, 'ILE-HD13': 0.0186, 'ILE-HD11': 0.0186, "RU5-H1'": 0.1824, "DG-C3'": 0.0713,
'RA5-N9': -0.0251,
"RU3-O5'": -0.4989, 'RA5-N7': -0.6073, 'RA5-N6': -0.9019, 'RA5-N1': -0.7615, 'RA5-N3': -0.6997,
"RCN-O2'": -0.6139,
"RC-H5'1": 0.0679, "RC-H5'2": 0.0679, 'HIE-CD2': -0.2207, 'HIP-CB': -0.0414, 'HIP-CA': -0.1354,
'HIP-CG': -0.0012,
'RU-N3': -0.3549, 'RU-N1': 0.0418, "DG-O3'": -0.5232, "DCN-C3'": 0.0713, 'DA-P': 1.1659, 'DTN-H3T': 0.4396,
"DG3-C4'": 0.1629, "RU-H3'": 0.0615, 'ARG-CZ': 0.8076, 'ARG-CG': 0.039, "DA-H3'": 0.0985, 'ARG-CB': -0.0007,
'ARG-CA': -0.2637, 'GLU-HG3': -0.0425, 'DCN-O2': -0.6548, "DCN-H4'": 0.1176, "DA5-H2'1": 0.0718,
'DCN-H41': 0.4314,
'TRP-HZ3': 0.1447, "DTN-H3'": 0.0985, "DG5-H5'1": 0.0754, "DG5-H5'2": 0.0754, 'RG3-N9': 0.0492,
'LYN-HZ3': 0.38604,
'LYN-HZ2': 0.38604, 'RG3-N7': -0.5709, 'RG3-N1': -0.4787, 'RG3-N3': -0.6323, 'RG3-N2': -0.9672,
"DC3-H2'1": 0.0718,
"DC3-H2'2": 0.0718, 'TRP-CD2': 0.1243, 'DT-H71': 0.077, 'DT-H72': 0.077, 'DT-H73': 0.077, "DA3-O3'": -0.6549,
'RGN-H21': 0.4364, 'ASP-HA': 0.088, 'ARG-HB2': 0.0327, 'ARG-HB3': 0.0327, "RU3-HO'2": 0.4186,
"DC5-O3'": -0.5232,
'DT3-H72': 0.077, 'DT3-H73': 0.077, "RCN-HO'2": 0.4186, 'DT3-H71': 0.077, 'DG3-O6': -0.5699,
"DGN-C2'": -0.0854,
'TRP-HH2': 0.1417, "RG3-C3'": 0.2022, "RC-C5'": 0.0558, "RUN-C3'": 0.2022, 'VAL-HG23': 0.0791,
'VAL-HG22': 0.0791,
'VAL-HG21': 0.0791, 'RG-C8': 0.1374, 'RG-C6': 0.477, 'RG-C4': 0.1222, 'RG-C5': 0.1744, 'RG-C2': 0.7657,
'GLH-H': 0.2719, 'GLH-O': -0.5679, 'GLH-N': -0.4157, 'GLH-C': 0.5973, 'RUN-O4': -0.5761, "RU-C1'": 0.0674,
"RU3-C4'": 0.1065, 'GLN-NE2': -0.9407, 'LYS-HG3': 0.0103, 'LYS-HG2': 0.0103, 'HIE-HE1': 0.1435,
'HIE-HE2': 0.3339,
'GLN-HA': 0.085, 'ILE-HG13': 0.0236, 'ILE-HG12': 0.0236, "RC3-H3'": 0.0615, "RC3-O2'": -0.6139,
'LYS-HZ1': 0.34,
'GLN-HB3': 0.0171, 'GLN-HB2': 0.0171, 'DA5-H61': 0.4167, "DAN-C5'": -0.0069, "DG-H3'": 0.0985,
'HIE-CG': 0.1868,
'HIE-CA': -0.0581, 'HIE-CB': -0.0074, 'DA3-O1P': -0.7761, 'RC3-O2P': -0.776, "RA-H3'": 0.0615,
'LEU-HD11': 0.1,
'LEU-HD12': 0.1, 'LEU-HD13': 0.1, 'RC3-H3T': 0.4376, "RG3-H2'1": 0.0972, "RU3-H3'": 0.0615,
"DAN-H2'2": 0.0718,
"RG3-HO'2": 0.4186, 'HIE-CE1': 0.1635, 'TYR-HB2': 0.0295, 'DC-H5': 0.1863, 'DC-H6': 0.2293,
"RCN-H5'2": 0.0679,
"RCN-H5'1": 0.0679, 'RU-O4': -0.5761, "DCN-C2'": -0.0854, 'RU-O2': -0.5477, "DTN-C1'": 0.068,
'CYM-O': -0.5679,
'CYM-N': -0.4157, 'DG5-C5': 0.1991, 'CYM-C': 0.5973, 'DG-O2P': -0.7761, 'RAN-H2': 0.0473, 'GLY-C': 0.5973,
'GLY-N': -0.4157, 'GLY-O': -0.5679, 'GLY-H': 0.2719, 'ASH-CB': -0.0316, 'ASH-CA': 0.0341, 'ASH-CG': 0.6462,
"DG3-C5'": -0.0069, 'DCN-H6': 0.2293, 'DCN-H5': 0.1863, "RG-O5'": -0.4989, 'ASH-HB2': 0.0488,
'ASH-HB3': 0.0488,
"RG-C5'": 0.0558, "RG3-H4'": 0.1174, 'THR-CA': -0.0389, 'THR-CB': 0.3654, "RCN-H4'": 0.1174,
'ASH-HA': 0.0864,
'RCN-H41': 0.4234, "DC3-C4'": 0.1629, 'RCN-H42': 0.4234, 'DCN-H5T': 0.4422, "DC-H2'1": 0.0718,
"DC-H2'2": 0.0718,
"RC-C4'": 0.1065, "RA5-HO'2": 0.4186, 'CYM-SG': -0.8844, "DC5-O4'": -0.3691, 'VAL-C': 0.5973,
"DG-C5'": -0.0069,
'DG3-N9': 0.0577, 'DG3-N3': -0.6636, 'DG3-N2': -0.923, 'DG3-N1': -0.5053, 'VAL-H': 0.2719, 'DG3-N7': -0.5725,
'VAL-N': -0.4157, "DGN-C5'": -0.0069, 'DT3-O1P': -0.7761, "DG5-C1'": 0.0358, "RUN-C2'": 0.067,
'GLU-C': 0.5366,
'DT-O1P': -0.7761, 'GLU-H': 0.2936, 'GLU-N': -0.5163, 'GLU-O': -0.5819, "RU3-C5'": 0.0558,
'DA3-O2P': -0.7761,
'DGN-H8': 0.1997, 'DGN-H1': 0.352, "DAN-O4'": -0.3691, "RA3-H5'2": 0.0679, "RA3-H5'1": 0.0679,
'DT-C2': 0.5677,
'CYX-HB2': 0.091, 'CYX-HB3': 0.091, "RAN-H1'": 0.2007, "RC3-O3'": -0.6541, "RA3-C5'": 0.0558,
'VAL-HG12': 0.0791,
'VAL-HG13': 0.0791, "RU5-O2'": -0.6139, 'VAL-HG11': 0.0791, "DAN-C4'": 0.1629, "DGN-O5'": -0.6318,
"DA-C2'": -0.0854,
'RC5-O2': -0.6252, "DA3-H3'": 0.0985, "RA-H4'": 0.1174, "RU5-H3'": 0.0615, 'HIE-HD2': 0.1862,
"DT-O5'": -0.4954,
"RC-HO'2": 0.4186, "RA5-C1'": 0.0394, 'DA3-H3T': 0.4396, "RC-H3'": 0.0615, "RC-O4'": -0.3548,
'HID-HB3': 0.0402,
'HID-HB2': 0.0402, 'DA5-H5T': 0.4422, 'RC-H5': 0.1928, "DC5-C4'": 0.1629, 'DAN-C4': 0.38, 'DAN-C6': 0.6897,
'DAN-C2': 0.5716, 'MET-N': -0.4157, 'MET-O': -0.5679, 'RU-H5': 0.1811, "DG-O5'": -0.4954, 'RU-H6': 0.2188,
'RA5-H5T': 0.4295, "DC-C2'": -0.0854, 'RUN-C6': -0.1126, 'RUN-C5': -0.3635, 'RUN-C4': 0.5952,
'RUN-C2': 0.4687,
'LEU-CD1': -0.4121, 'RA3-H61': 0.4115, 'RA3-H62': 0.4115, 'LEU-CD2': -0.4121, 'DTN-O2': -0.5881,
'DTN-O4': -0.5563,
"DG3-C2'": -0.0854, 'ALA-N': -0.4157, 'ALA-O': -0.5679, 'ALA-H': 0.2719, "DA-H1'": 0.1838, 'ALA-C': 0.5973,
'RU5-H6': 0.2188, 'RU5-H5': 0.1811, 'RU5-H3': 0.3154, "DTN-H1'": 0.1804, 'DA-H62': 0.4167, "RG-C4'": 0.1065,
"RA-O3'": -0.5246, 'THR-HG1': 0.4102, 'DTN-N1': -0.0239, 'DT-O2P': -0.7761, "DT-H1'": 0.1804,
'RC3-P': 1.1662,
"DC3-C5'": -0.0069, "RC-C3'": 0.2022, "DTN-O4'": -0.3691, 'ARG-HD2': 0.0687, 'ARG-HD3': 0.0687,
'RU3-C4': 0.5952,
'RU3-C5': -0.3635, 'RU3-C6': -0.1126, "DT3-H1'": 0.1804, "DT5-C5'": -0.0069, 'DC-C5': -0.5222,
'RCN-H5T': 0.4295,
"DC5-O5'": -0.6318, "DG-C4'": 0.1629, "DG3-O4'": -0.3691, 'DC-C2': 0.7959, "DGN-C4'": 0.1629,
'DG3-C2': 0.7432,
'DG3-C4': 0.1814, 'DG3-C5': 0.1991, "DG5-C2'": -0.0854, 'DG3-C8': 0.0736, 'LYS-CE': -0.0143,
"RUN-C5'": 0.0558,
"RUN-O5'": -0.6223, "DT5-O5'": -0.6318, 'ASN-C': 0.5973, "RU3-C2'": 0.067, 'ASN-H': 0.2719, 'ASN-N': -0.4157,
'ASN-O': -0.5679, "DAN-O3'": -0.6549, "RA5-H5'1": 0.0679, 'DA3-P': 1.1659, "RA5-H5'2": 0.0679,
'DC3-O2': -0.6548,
"DCN-H2'2": 0.0718, "DCN-H2'1": 0.0718, "DA3-H5'1": 0.0754, "RU-C3'": 0.2022, 'DG3-P': 1.1659,
'DA5-H62': 0.4167,
"RG-H2'1": 0.0972, "RGN-O5'": -0.6223, 'PHE-CZ': -0.1072, 'PHE-CA': -0.0024, 'PHE-CB': -0.0343,
'RGN-H22': 0.4364,
'PHE-CG': 0.0118, "RU5-O3'": -0.5246, "RU-H4'": 0.1174, "DGN-O4'": -0.3691, "DA-C3'": 0.0713,
'RC5-N3': -0.7584,
'RC5-N1': -0.0484, "DA-C1'": 0.0431, "RAN-O3'": -0.6541, 'RC5-N4': -0.953, "DA5-O3'": -0.5232,
"DA5-H4'": 0.1176,
'TRP-CA': -0.0275, 'TRP-CB': -0.005, 'TRP-CG': -0.1415, "RA5-C2'": 0.067, 'HIP-HA': 0.1212, 'RC-P': 1.1662,
"RU3-H1'": 0.1824, 'DC-O2': -0.6548, "DC-O5'": -0.4954, "DC5-C5'": -0.0069, 'DT-C5': 0.0025, 'DT-C4': 0.5194,
'DT-C7': -0.2269, 'DT-C6': -0.2209, 'GLU-HB2': -0.0173, 'GLU-HB3': -0.0173, "DG-O4'": -0.3691,
'DT3-C5': 0.0025,
'VAL-CG2': -0.3192, "DC-C5'": -0.0069, "RA3-C2'": 0.067, 'DA-H61': 0.4167, 'DTN-N3': -0.434,
'ILE-CD1': -0.066,
"DG3-C3'": 0.0713, "DA5-O4'": -0.3691, "DC3-O4'": -0.3691, 'RU5-O2': -0.5477, 'RU5-O4': -0.5761,
'DT3-C2': 0.5677,
'RA-O2P': -0.776, 'DT3-C7': -0.2269, 'DT3-C6': -0.2209, "RGN-C5'": 0.0558, 'DT3-C4': 0.5194,
"RA-O2'": -0.6139,
'THR-C': 0.5973, 'THR-H': 0.2719, 'THR-N': -0.4157, 'THR-O': -0.5679, 'DA-C4': 0.38, "DC3-H5'1": 0.0754,
'PRO-CD': 0.0192, 'PRO-CG': 0.0189, 'PRO-CA': -0.0266, "RC-C2'": 0.067, 'PRO-CB': -0.007, "DTN-O5'": -0.6318,
'PHE-HB3': 0.0295, "DC-H5'2": 0.0754, "DC-H5'1": 0.0754, "DT-O3'": -0.5232, "DT5-C4'": 0.1629,
"DA3-C2'": -0.0854,
"DG3-O5'": -0.4954, "DAN-H5'2": 0.0754, "DT3-O3'": -0.6549, "DG5-C3'": 0.0713, 'TRP-CH2': -0.1134,
"RU5-H2'1": 0.0972,
"RG-O3'": -0.5246, "DAN-H2'1": 0.0718, 'ASP-CB': -0.0303, 'ASP-CA': 0.0381, 'ASP-CG': 0.7994,
'LYN-HG2': 0.01041,
'LYN-HG3': 0.01041, 'ARG-HG3': 0.0285, 'ARG-HG2': 0.0285, 'DG-C2': 0.7432, "RUN-O4'": -0.3548,
'DG-C4': 0.1814,
"DT5-O4'": -0.3691, 'DG-C6': 0.4918, "RU3-C3'": 0.2022, 'DG-C8': 0.0736, "RA-H5'2": 0.0679,
"RA-H5'1": 0.0679,
'RGN-H3T': 0.4376, 'CYS-HB3': 0.1112, 'CYS-HB2': 0.1112, "RC-O5'": -0.4989, 'DT-P': 1.1659, "RU-C2'": 0.067,
'GLH-HE2': 0.4641, "RGN-H3'": 0.0615, "RGN-O4'": -0.3548, 'GLN-HG2': 0.0352, 'CYX-H': 0.2719,
'CYX-N': -0.4157,
'CYX-O': -0.5679, 'CYX-C': 0.5973, 'PRO-HG2': 0.0213, 'PRO-HG3': 0.0213, "RAN-O2'": -0.6139, 'LEU-HD21': 0.1,
'LEU-HD23': 0.1, 'LEU-HD22': 0.1, "RAN-H3'": 0.0615, 'RA-H62': 0.4115, "RC3-H4'": 0.1174, 'RC3-H41': 0.4234,
'HIE-HB2': 0.0367, 'HIE-HB3': 0.0367, 'DC3-C4': 0.8439, 'DC3-C5': -0.5222, 'DC3-C6': -0.0183,
'DC5-H6': 0.2293,
'DC5-H5': 0.1863, 'RU5-H5T': 0.4295, "RA5-C3'": 0.2022, 'RAN-H3T': 0.4376, "RC5-O4'": -0.3548,
'HID-HD2': 0.1147,
'HID-HD1': 0.3649, 'MET-HA': 0.088, 'PHE-HB2': 0.0295, "DC3-O5'": -0.4954, 'RGN-H1': 0.3424,
"DTN-C4'": 0.1629,
'RGN-H8': 0.164, "RA3-H4'": 0.1174, "DC-C4'": 0.1629, "RC-H1'": 0.2029, 'RA-C5': 0.0515, 'RA-C6': 0.7009,
'RA-C2': 0.5875, "RA3-C3'": 0.2022, 'RA-C8': 0.2006, 'GLH-CB': -0.0071, 'GLH-CA': 0.0145, 'GLH-CG': -0.0174,
'GLH-CD': 0.6801, "DC3-C3'": 0.0713, 'RU5-N1': 0.0418, 'RU5-N3': -0.3549, "DC-C3'": 0.0713,
"RGN-C4'": 0.1065,
"RG3-H5'1": 0.0679, "RG3-H5'2": 0.0679, "RC5-HO'2": 0.4186, "RG-O2'": -0.6139, 'LEU-HA': 0.0922,
'ARG-NH1': -0.8627,
'LEU-HG': -0.0361, "RCN-H3'": 0.0615, "DT-H3'": 0.0985, "RC-C1'": 0.0066, 'RG-O2P': -0.776,
'RU3-H3T': 0.4376,
"DT3-H3'": 0.0985, 'RCN-H3T': 0.4376, "DT-O4'": -0.3691, 'ASN-HA': 0.1048, 'RU-O1P': -0.776,
"DGN-H1'": 0.1746,
"DG3-H3'": 0.0985, "DG5-C4'": 0.1629, 'LYS-HE3': 0.1135, "RUN-C4'": 0.1065, 'TRP-HB2': 0.0339,
'TRP-HB3': 0.0339,
"DC5-H3'": 0.0985, 'LYN-HD3': 0.01155, 'LYN-HD2': 0.01155, 'GLY-CA': -0.0252, 'RG3-O1P': -0.776,
'GLN-C': 0.5973,
'RA-O1P': -0.776, 'GLN-H': 0.2719, 'GLN-O': -0.5679, 'GLN-N': -0.4157, 'GLN-CA': -0.0031, 'RG5-C5': 0.1744,
'LEU-HB2': 0.0457, 'DG3-H3T': 0.4396, 'DT3-O2P': -0.7761, "RC3-C1'": 0.0066, 'GLN-CG': -0.0645,
'ALA-HB2': 0.0603,
'ALA-HB3': 0.0603, 'ALA-HB1': 0.0603, 'PRO-HD3': 0.0391, 'PRO-HD2': 0.0391, 'GLU-HA': 0.1105, 'RU-P': 1.1662,
"RAN-O5'": -0.6223, "DA3-H4'": 0.1176, 'VAL-CA': -0.0875, "RU5-H4'": 0.1174, "RAN-C5'": 0.0558,
'VAL-CB': 0.2985,
'RA-P': 1.1662, "RC5-O5'": -0.6223, 'RA3-H3T': 0.4376, 'HID-HE1': 0.1392, "DC5-H2'2": 0.0718,
"DC5-H2'1": 0.0718,
"DT-C4'": 0.1629, 'DC3-O2P': -0.7761, "DA-H4'": 0.1176, "RC5-H5'2": 0.0679, "RC5-H5'1": 0.0679,
'HID-CE1': 0.2057,
"DTN-C5'": -0.0069, 'DT-O4': -0.5563, 'DT5-C2': 0.5677, "RA3-H3'": 0.0615, 'DT5-C4': 0.5194,
'DT5-C7': -0.2269,
'DT-O2': -0.5881, 'DTN-H3': 0.342, 'DTN-H6': 0.2607, "DG3-C1'": 0.0358, 'GLU-OE2': -0.8188,
'GLU-OE1': -0.8188,
"RUN-H5'1": 0.0679, "RUN-H5'2": 0.0679, "DT5-H3'": 0.0985, "DCN-H1'": 0.1963, 'RG-O1P': -0.776,
'ALA-HA': 0.0823,
'DG-C5': 0.1991, 'ILE-HB': 0.0187, 'ILE-HA': 0.0869, "RGN-C3'": 0.2022, 'TYR-HB3': 0.0295, 'RCN-H5': 0.1928,
'RCN-H6': 0.1958, 'HID-CG': -0.0266, 'HID-CB': -0.0462, 'HID-CA': 0.0188, 'ASN-HB2': 0.0797,
'ASN-HB3': 0.0797,
'RUN-H6': 0.2188, 'RUN-H5': 0.1811, 'RUN-H3': 0.3154, "DG-H5'2": 0.0754, 'RC3-H5': 0.1928, 'RG3-O2P': -0.776,
"DTN-O3'": -0.6549, 'SER-CA': -0.0249, 'SER-CB': 0.2117, "DT5-C2'": -0.0854, "DG5-O5'": -0.6318,
'DGN-H21': 0.4235,
'DGN-H22': 0.4235, "DG-C1'": 0.0358, "RG-H4'": 0.1174, "DT3-O5'": -0.4954, "DG5-C5'": -0.0069,
'LYN-HE2': -0.03358,
'LYN-HE3': -0.03358, "RG5-C2'": 0.067, 'DG3-H21': 0.4235, 'DG3-H22': 0.4235, 'MET-HE3': 0.0684,
"RG3-O2'": -0.6139,
"DT5-H1'": 0.1804, "RU3-C1'": 0.0674, 'VAL-CG1': -0.3192, 'RA3-C8': 0.2006, 'RA3-C4': 0.3053,
'RA3-C5': 0.0515,
'RA3-C6': 0.7009, 'RA3-C2': 0.5875, "RG5-HO'2": 0.4186, 'DA-C2': 0.5716, 'DC3-P': 1.1659, 'DA-C6': 0.6897,
"DA5-C4'": 0.1629, 'DA-C5': 0.0725, "DC3-H5'2": 0.0754, 'DA-C8': 0.1607, "DT3-C5'": -0.0069,
"RCN-C3'": 0.2022,
"RC3-H2'1": 0.0972, "RG5-H5'2": 0.0679, "RG5-H5'1": 0.0679, 'GLN-HG3': 0.0352, "RGN-H1'": 0.2006,
"RC5-C4'": 0.1065,
'DT-N1': -0.0239, 'RC5-C2': 0.7538, 'RC5-C4': 0.8185, 'RC5-C5': -0.5215, 'RC5-C6': 0.0053,
"RAN-O4'": -0.3548,
'RA5-H8': 0.1553, 'RG5-C8': 0.1374, 'RG5-C4': 0.1222, 'RA5-H2': 0.0473, 'RG5-C6': 0.477, 'GLN-CB': -0.0036,
'GLN-CD': 0.6951, 'RG5-C2': 0.7657, "RUN-H1'": 0.1824, 'GLH-HG2': 0.043, 'GLH-HG3': 0.043, "RG5-H4'": 0.1174,
"RAN-C4'": 0.1065, "RG3-H1'": 0.2006, 'RAN-C2': 0.5875, 'RAN-C6': 0.7009, 'RAN-C5': 0.0515, 'RAN-C4': 0.3053,
'HIE-HA': 0.136, 'RC3-H6': 0.1958, "DG-H5'1": 0.0754, 'RAN-C8': 0.2006, "RGN-H5'2": 0.0679,
'LYS-HB3': 0.0362,
"RGN-H5'1": 0.0679, "DT-C5'": -0.0069, 'LEU-HB3': 0.0457, 'ASN-OD1': -0.5931, "RC5-H3'": 0.0615,
"DC3-O3'": -0.6549,
'LYN-HA': 0.0994, 'HID-CD2': 0.1292, 'ILE-CG2': -0.3204, 'DT-N3': -0.434, 'ILE-CG1': -0.043,
"DC-H1'": 0.1963,
"DCN-H5'1": 0.0754, "DCN-H5'2": 0.0754, "RA3-C1'": 0.0394, "RG-HO'2": 0.4186, 'ASN-HD21': 0.4196,
'CYS-CB': -0.1231,
'CYS-CA': 0.0213, 'ASN-HD22': 0.4196, 'DT3-N1': -0.0239, 'DT3-N3': -0.434, "RGN-C2'": 0.067,
"DCN-O3'": -0.6549,
'PHE-CE1': -0.1704, 'PHE-CE2': -0.1704, 'DGN-H3T': 0.4396, "RCN-H1'": 0.2029, 'DT5-N1': -0.0239,
"DGN-H3'": 0.0985,
"RU-O3'": -0.5246, 'RG3-H8': 0.164, 'RG3-H1': 0.3424, "RG3-O3'": -0.6541, 'TYR-H': 0.2719, 'TYR-O': -0.5679,
'TYR-N': -0.4157, "RA5-H2'1": 0.0972, 'TYR-C': 0.5973, 'VAL-O': -0.5679, "DG3-H1'": 0.1746,
"DT3-O4'": -0.3691,
'LYN-HB3': 0.034, 'LYN-HB2': 0.034, "RG5-C3'": 0.2022, "DT5-C1'": 0.068, 'THR-CG2': -0.2438, 'DG-N9': 0.0577,
'DG-N3': -0.6636, 'DG-N2': -0.923, 'DG-N1': -0.5053, 'DG-N7': -0.5725, 'DAN-H62': 0.4167, 'DAN-H61': 0.4167,
"DA-O3'": -0.5232, 'TRP-N': -0.4157, 'TRP-O': -0.5679, 'TRP-H': 0.2719, "DA5-C5'": -0.0069, 'MET-CB': 0.0342,
'TRP-C': 0.5973, "DT3-C4'": 0.1629, "RU-O5'": -0.4989, 'DA-N7': -0.6175, 'DA-N6': -0.9123, 'DA-N1': -0.7624,
'DA-N3': -0.7417, "RCN-C2'": 0.067, 'DA-N9': -0.0268, 'CYM-HA': 0.0508, 'CYM-HN': 0.2719, 'RAN-H8': 0.1553,
'PRO-HB3': 0.0253, 'PRO-HB2': 0.0253, 'RA-N6': -0.9019, 'DC3-C2': 0.7959, "RC5-C5'": 0.0558,
'DA5-H8': 0.1877,
'RA-N3': -0.6997, 'DA5-H2': 0.0598, 'RA-H61': 0.4115, "DT-H2'1": 0.0718, "DT-H2'2": 0.0718,
"RUN-O3'": -0.6541,
"RC3-C2'": 0.067, "RU5-C1'": 0.0674, "RG5-H3'": 0.0615, 'TRP-CD1': -0.1638, 'RU3-O2P': -0.776,
'DC-N4': -0.9773,
'DC-N3': -0.7748, 'DC-N1': -0.0339, 'HIE-H': 0.2719, 'HIE-N': -0.4157, 'HIE-O': -0.5679, 'RC3-O2': -0.6252,
'GLY-HA3': 0.0698, 'GLY-HA2': 0.0698, 'RC3-H42': 0.4234, "DC5-C1'": -0.0116, 'DC-O1P': -0.7761,
"DA3-C1'": 0.0431,
'RC-C2': 0.7538, 'RC-C6': 0.0053, 'RC-C4': 0.8185, 'RC-C5': -0.5215, "DGN-H2'2": 0.0718, "DGN-H2'1": 0.0718,
"RA3-H1'": 0.2007, "RU3-O2'": -0.6139, "RC-H4'": 0.1174, 'RC-H41': 0.4234, 'RC-H42': 0.4234,
"RGN-C1'": 0.0191,
'RA-H2': 0.0473, 'RU5-C6': -0.1126, 'RU5-C4': 0.5952, 'RU5-C5': -0.3635, 'RU5-C2': 0.4687, 'RA-H8': 0.1553,
"DCN-H3'": 0.0985, 'DT3-O4': -0.5563, 'DT3-O2': -0.5881, "RC5-H2'1": 0.0972, 'CYS-H': 0.2719,
'CYS-O': -0.5679,
'CYS-N': -0.4157, 'RCN-N3': -0.7584, 'CYS-C': 0.5973, 'RCN-N1': -0.0484, 'RCN-N4': -0.953, 'CYX-HA': 0.0766,
'LYS-HA': 0.1426, 'GLH-HA': 0.0779, 'PHE-CD2': -0.1256, 'DCN-H3T': 0.4396, 'PHE-CD1': -0.1256,
"DG5-H2'2": 0.0718,
'DT5-O4': -0.5563, "DG5-H2'1": 0.0718, 'DT5-O2': -0.5881, "RG5-H2'1": 0.0972, "DGN-H4'": 0.1176,
"RGN-HO'2": 0.4186,
"DA-H2'2": 0.0718, "DA-H2'1": 0.0718, 'HIP-HD2': 0.2317, 'HIP-HD1': 0.3866, "DAN-C3'": 0.0713,
'PHE-H': 0.2719,
'PHE-O': -0.5679, 'PHE-N': -0.4157, "RA3-O4'": -0.3548, 'PHE-C': 0.5973, 'DT5-N3': -0.434, 'DA3-C6': 0.6897,
"RG3-O4'": -0.3548, "DT3-H2'2": 0.0718, "DT3-H2'1": 0.0718, 'DA3-C5': 0.0725, 'RG-H22': 0.4364,
'RG-H21': 0.4364,
"DC-O4'": -0.3691, 'LYS-HZ2': 0.34, 'LYS-HZ3': 0.34, "RA-C1'": 0.0394, 'RU3-H5': 0.1811, 'RU3-H6': 0.2188,
'RU3-H3': 0.3154, 'DT5-H71': 0.077, 'DT5-H72': 0.077, 'DT5-H73': 0.077, 'MET-HG2': 0.044, "RG3-C4'": 0.1065,
"DA5-C2'": -0.0854, "DT3-H5'2": 0.0754, "RCN-C1'": 0.0066, 'DA-O2P': -0.7761, 'DG-O6': -0.5699,
"RC5-C2'": 0.067,
'DGN-N9': 0.0577, 'SER-OG': -0.6546, 'DGN-N7': -0.5725, 'DGN-N1': -0.5053, 'DGN-N2': -0.923,
'DGN-N3': -0.6636,
'DA3-H2': 0.0598, 'DA3-H8': 0.1877, "RU-H1'": 0.1824, "DGN-O3'": -0.6549, 'CYS-SG': -0.3119, 'RG-H1': 0.3424,
'TRP-HA': 0.1123, 'RG3-H3T': 0.4376, 'RG-H8': 0.164, "RC3-C3'": 0.2022, 'TRP-HZ2': 0.1572, "RUN-H3'": 0.0615,
"RU5-C2'": 0.067, "DC5-C2'": -0.0854, 'RG5-O6': -0.5597, 'RUN-H3T': 0.4376, 'MET-HB2': 0.0241,
'MET-HB3': 0.0241,
'RG5-H21': 0.4364, 'RG5-H22': 0.4364, "RG3-H3'": 0.0615, 'RC3-N1': -0.0484, 'RC3-N3': -0.7584,
'RC3-N4': -0.953,
"RC5-H1'": 0.2029, 'LYN-NZ': -1.03581, "DAN-H5'1": 0.0754, 'THR-HA': 0.1007, 'THR-HB': 0.0043,
"RU3-O3'": -0.6541,
"RU3-H4'": 0.1174, "DG-H4'": 0.1176, "RCN-H2'1": 0.0972, 'RA-C4': 0.3053, 'TYR-CE1': -0.2341,
'TYR-CE2': -0.2341,
"DC-H3'": 0.0985, 'DT3-H3': 0.342, 'DT3-H6': 0.2607, "DCN-C5'": -0.0069, 'DC3-O1P': -0.7761,
'RCN-O2': -0.6252,
"DCN-O5'": -0.6318, 'THR-OG1': -0.6761, 'DGN-H5T': 0.4422, 'RGN-C5': 0.1744, "RA-C3'": 0.2022,
'RGN-C6': 0.477,
"DC5-H5'1": 0.0754, "DC5-H5'2": 0.0754, 'HIP-HE1': 0.2681, 'HIP-HE2': 0.3911, "DG-C2'": -0.0854,
'HID-HA': 0.0881,
"RA3-O5'": -0.4989, 'TRP-CE2': 0.138, 'TRP-CE3': -0.2387, "RG3-O5'": -0.4989, "RG-H3'": 0.0615,
'DA-O1P': -0.7761,
"DG5-H1'": 0.1746, "RA5-H4'": 0.1174, "DG3-H5'2": 0.0754, "DG3-H5'1": 0.0754, 'RG3-P': 1.1662,
'ILE-CB': 0.1303,
'ILE-CA': -0.0597, "DT5-H4'": 0.1176, "DAN-H4'": 0.1176, "RG5-C1'": 0.0191, 'RA3-N3': -0.6997,
"RG3-C5'": 0.0558,
"DA5-C3'": 0.0713, 'RA5-H62': 0.4115, 'GLH-HB3': 0.0256, 'RA5-H61': 0.4115, "DGN-H5'1": 0.0754,
"DGN-H5'2": 0.0754,
'PRO-C': 0.5896, 'PRO-O': -0.5748, 'PRO-N': -0.2548, 'RA5-C8': 0.2006, "DC5-C3'": 0.0713, 'TYR-CA': -0.0014,
'TYR-CB': -0.0152, 'TYR-CG': -0.0011, 'DG-H1': 0.352, 'RA5-C2': 0.5875, "RGN-H4'": 0.1174, "RC5-C3'": 0.2022,
'DG-H8': 0.1997, 'TYR-CZ': 0.3226, 'DGN-O6': -0.5699, "RU5-C3'": 0.2022, 'RA5-C6': 0.7009, 'RG5-N1': -0.4787,
"RC3-O4'": -0.3548, 'RA5-C5': 0.0515, 'LEU-H': 0.2719, 'ASP-C': 0.5366, 'LEU-N': -0.4157, 'LEU-O': -0.5679,
'THR-HG22': 0.0642, 'ASP-H': 0.2936, 'LEU-C': 0.5973, 'ASP-N': -0.5163, 'ASP-O': -0.5819, "RC3-C4'": 0.1065,
'RA3-N9': -0.0251, 'RA3-N7': -0.6073, 'RA3-N6': -0.9019, "RA-HO'2": 0.4186, "RUN-H4'": 0.1174,
'RA3-N1': -0.7615,
'GLH-HB2': 0.0256, 'ASH-H': 0.2719, 'ASH-N': -0.4157, 'ASH-O': -0.5679, 'RG5-N9': 0.0492, 'RG3-H22': 0.4364,
'ASH-C': 0.5973, "RG5-H1'": 0.2006, 'RG3-H21': 0.4364, "RAN-C1'": 0.0394, 'RG5-N2': -0.9672,
'RA5-C4': 0.3053,
"DT5-H5'1": 0.0754, "RA-H2'1": 0.0972, 'RAN-N9': -0.0251, 'RAN-N1': -0.7615, 'RAN-N3': -0.6997,
"DA5-O5'": -0.6318,
'RAN-N6': -0.9019, 'RAN-N7': -0.6073, "DC-O3'": -0.5232, "RAN-HO'2": 0.4186, "DA3-C3'": 0.0713,
'DC5-C2': 0.7959,
'DC3-N4': -0.9773, 'DC5-C6': -0.0183, 'DC5-C4': 0.8439, 'DC5-C5': -0.5222, "RA3-C4'": 0.1065,
"RC-O3'": -0.5246,
'CYS-HA': 0.1124, 'CYS-HG': 0.1933, 'HIP-CE1': -0.017, 'SER-H': 0.2719, 'SER-N': -0.4157, 'SER-O': -0.5679,
'SER-C': 0.5973, 'TYR-CD2': -0.1906, 'TYR-CD1': -0.1906, "DC-H4'": 0.1176, 'TRP-NE1': -0.3418,
"DCN-C4'": 0.1629,
'DC-H42': 0.4314, 'DC-H41': 0.4314, 'ASP-OD2': -0.8014, 'ASP-OD1': -0.8014, "RU3-H2'1": 0.0972,
'ASH-HD2': 0.4747,
"DCN-O4'": -0.3691, 'GLN-HE21': 0.4251, "RG5-O4'": -0.3548, 'GLN-HE22': 0.4251, "DG-H2'1": 0.0718,
"DG-H2'2": 0.0718,
'LYS-NZ': -0.3854, 'ILE-C': 0.5973, 'PHE-HD1': 0.133, "RU5-HO'2": 0.4186, 'PHE-HD2': 0.133, "RA-C2'": 0.067,
'ILE-H': 0.2719, "RGN-H2'1": 0.0972, 'ILE-O': -0.5679, 'DC5-H5T': 0.4422, "RG-C3'": 0.2022,
"RA3-O2'": -0.6139,
'TRP-HE1': 0.3412, 'TRP-HE3': 0.17, 'RA3-O2P': -0.776, 'RG3-C8': 0.1374, 'RG3-C2': 0.7657, 'RG3-C6': 0.477,
'RG3-C4': 0.1222, 'RG3-C5': 0.1744, "DC3-C2'": -0.0854, 'DT3-H3T': 0.4396, 'ARG-NH2': -0.8627,
'TYR-HA': 0.0876,
'RGN-H5T': 0.4295, 'DAN-H5T': 0.4422, "RUN-O2'": -0.6139, "RA5-H1'": 0.2007, 'TYR-HH': 0.3992,
'HIP-ND1': -0.1513,
'DG3-H8': 0.1997, 'DG3-H1': 0.352, "RA5-O2'": -0.6139, 'ARG-C': 0.7341, 'ARG-N': -0.3479, 'ARG-O': -0.5894,
'ARG-H': 0.2747, 'DT5-H5T': 0.4422, "DA3-H2'2": 0.0718, "DA3-H2'1": 0.0718, 'RC-H6': 0.1958,
'DA3-N1': -0.7624,
'DA3-N3': -0.7417, "RU5-C4'": 0.1065, 'DA3-N7': -0.6175, 'DA3-N6': -0.9123, 'DA3-N9': -0.0268,
'DC-O2P': -0.7761,
'TRP-CZ3': -0.1972, 'TRP-CZ2': -0.2601, 'RA3-P': 1.1662, "RC3-O5'": -0.4989, "RG-H5'1": 0.0679,
"RG-H5'2": 0.0679,
'RG-N9': 0.0492, 'DAN-C5': 0.0725, 'RU-O2P': -0.776, 'RG-N1': -0.4787, 'RG-N3': -0.6323, 'RG-N2': -0.9672,
'RG-N7': -0.5709, "DTN-H5'2": 0.0754, "DTN-H5'1": 0.0754, "RC3-C5'": 0.0558, 'DC3-N3': -0.7748,
'HIP-H': 0.2747,
'DC3-N1': -0.0339, 'HIP-O': -0.5894, 'HIP-N': -0.3479, 'HIP-C': 0.7341, 'MET-C': 0.5973, 'RUN-H5T': 0.4295,
"RU-O2'": -0.6139, 'RU-H3': 0.3154, "DA3-O4'": -0.3691, "RU5-O4'": -0.3548, 'MET-H': 0.2719,
'DAN-C8': 0.1607,
"DT-C1'": 0.068, "DA5-H3'": 0.0985, "DA3-C4'": 0.1629, 'HID-O': -0.5679, 'HID-N': -0.4157, 'HID-H': 0.2719,
'HID-C': 0.5973, "DT5-C3'": 0.0713, "DAN-C2'": -0.0854, 'RC-O2P': -0.776, "DG5-O4'": -0.3691,
"RC-O2'": -0.6139,
'HIP-CD2': -0.1141, "DAN-O5'": -0.6318, 'RU3-O1P': -0.776, 'PHE-HA': 0.0978, 'DC-C4': 0.8439,
'RC-N4': -0.953,
'DC-C6': -0.0183, 'RC-N1': -0.0484, 'PHE-HZ': 0.1297, "DT-H5'2": 0.0754, 'LYS-CG': 0.0187, 'LYS-CD': -0.0479,
"DT-H5'1": 0.0754, 'LYS-CB': -0.0094, 'CYX-SG': -0.1081, 'LYS-CA': -0.24, 'DTN-H73': 0.077, 'DTN-H72': 0.077,
'DTN-H71': 0.077, 'DC-P': 1.1659, "RG5-O5'": -0.6223, "RC-H2'1": 0.0972, 'DG-P': 1.1659, 'GLH-OE2': -0.6511,
'GLH-OE1': -0.5838, 'PHE-HE2': 0.143, 'PHE-HE1': 0.143, "DA3-H5'2": 0.0754, "RA-C5'": 0.0558,
'ARG-HH22': 0.4478,
'ARG-HH21': 0.4478, "RG-C2'": 0.067, 'ALA-CB': -0.1825, 'ALA-CA': 0.0337, "RA3-O3'": -0.6541,
'TRP-HD1': 0.2062,
"RA3-H2'1": 0.0972, "RG-H1'": 0.2006, "DC5-H4'": 0.1176, "DG5-H3'": 0.0985, "RUN-H2'1": 0.0972,
"RA-O5'": -0.4989,
'DC5-H41': 0.4314, 'DC5-H42': 0.4314, 'SER-HA': 0.0843, "DT5-H5'2": 0.0754, 'SER-HG': 0.4275,
'DA3-H62': 0.4167,
'DA3-H61': 0.4167, "RUN-HO'2": 0.4186, "RU5-H5'1": 0.0679, "RU-H2'1": 0.0972, "RU5-H5'2": 0.0679,
'DG3-O2P': -0.7761,
"DA5-C1'": 0.0431, 'HIP-NE2': -0.1718, 'DT3-P': 1.1659, 'DG5-H1': 0.352, 'DG5-H8': 0.1997,
"RA5-O3'": -0.5246,
'LYN-C': 0.5973, 'LYN-O': -0.5679, 'LYN-N': -0.4157, 'LYN-H': 0.2719, "DA-H5'1": 0.0754, "DA-H5'2": 0.0754,
"RC5-C1'": 0.0066, 'RG5-N7': -0.5709, "RAN-H5'1": 0.0679, "RAN-H5'2": 0.0679, "RU5-C5'": 0.0558,
"RGN-O3'": -0.6541,
'CYM-CA': -0.0351, 'DGN-C8': 0.0736, 'DGN-C6': 0.4918, 'DGN-C5': 0.1991, 'DGN-C4': 0.1814, 'DGN-C2': 0.7432,
"RU-C5'": 0.0558, 'RG-O6': -0.5597, "DT5-O3'": -0.5232, 'RA3-H8': 0.1553, 'RA3-H2': 0.0473,
"RA5-H3'": 0.0615,
'RG5-H8': 0.164, "DT-C2'": -0.0854, 'RG5-H1': 0.3424, 'TYR-OH': -0.5579, "RAN-H4'": 0.1174,
'MET-CA': -0.0237,
"RAN-C3'": 0.2022, 'MET-CE': -0.0536, 'MET-CG': 0.0018, 'RC3-C6': 0.0053, 'RC3-C4': 0.8185,
'RC3-C5': -0.5215,
'RC3-C2': 0.7538, 'DAN-H8': 0.1877, "DA3-O5'": -0.4954, 'HID-ND1': -0.3811, 'DAN-H2': 0.0598,
"RU5-O5'": -0.6223,
'DC3-H42': 0.4314, "DA5-H5'2": 0.0754, "DA5-H5'1": 0.0754, 'DC3-H41': 0.4314, "DA-C4'": 0.1629,
"DC3-H4'": 0.1176,
'RG5-N3': -0.6323, 'DTN-C7': -0.2269, "DA3-C5'": -0.0069, 'MET-SD': -0.2737, 'RC5-H5': 0.1928,
'DA3-C4': 0.38,
'RC5-H6': 0.1958, 'DA3-C2': 0.5716, 'DA3-C8': 0.1607, 'MET-HG3': 0.044, 'HIE-NE2': -0.2795, 'DC3-H5': 0.1863,
"DT3-H5'1": 0.0754, "DAN-C1'": 0.0431, 'DC3-H6': 0.2293, 'GLN-OE1': -0.6086, 'ARG-HE': 0.3456,
'DC5-O2': -0.6548,
'ARG-HA': 0.156, 'LYN-CE': 0.32604, 'LYN-CD': -0.03768, 'LYN-CG': 0.06612, 'RGN-C2': 0.7657,
'LYN-CA': -0.07206,
'RGN-C4': 0.1222, 'LYN-CB': -0.04845, "RA5-C4'": 0.1065, 'RGN-C8': 0.1374, 'RC-O2': -0.6252,
"RCN-O5'": -0.6223,
'RUN-N1': 0.0418, 'RUN-N3': -0.3549, "RG5-O2'": -0.6139, 'RC-O1P': -0.776, 'HIE-C': 0.5973, "RA-C4'": 0.1065,
'ASN-CG': 0.713, 'ARG-HH12': 0.4478, 'ASN-CA': 0.0143, 'ASN-CB': -0.2041, 'ARG-HH11': 0.4478,
'ARG-CD': 0.0486,
"RG-C1'": 0.0191, 'TYR-HD1': 0.1699, "DG5-O3'": -0.5232, 'TYR-HD2': 0.1699, 'ILE-N': -0.4157,
"RC3-HO'2": 0.4186,
"DG3-O3'": -0.6549, "DG3-H4'": 0.1176, "RA-O4'": -0.3548, 'ASH-OD2': -0.6376, 'ASH-OD1': -0.5554,
'DG5-H22': 0.4235,
'DG5-H21': 0.4235, "DT3-H4'": 0.1176, "DT3-C3'": 0.0713, 'DAN-H3T': 0.4396, "DGN-C1'": 0.0358,
"RG5-C4'": 0.1065,
"RG-O4'": -0.3548, 'DG5-O6': -0.5699, "RA5-O4'": -0.3548, "RCN-C5'": 0.0558, "DAN-H3'": 0.0985}
nt_charges = {'HID-H2': 0.1963, 'HID-H3': 0.1963, 'HID-H1': 0.1963, 'ASN-HB2': 0.0515, 'ALA-HB2': 0.03, 'ALA-HB3': 0.03,
'ALA-HB1': 0.03, 'THR-OG1': -0.6764, 'GLU-H1': 0.2391, 'GLU-H3': 0.2391, 'GLU-H2': 0.2391, 'PRO-HD3': 0.1,
'PRO-HD2': 0.1, 'GLU-HA': 0.1202, 'THR-CA': 0.0034, 'THR-CB': 0.4514, 'LYS-HB2': 0.0283,
'ILE-HG21': 0.0947,
'ILE-HG22': 0.0947, 'ILE-HG23': 0.0947, 'HID-NE2': -0.5711, 'HIP-HE1': 0.2645, 'HIP-HE2': 0.3921,
'GLU-CD': 0.8087,
'VAL-CA': -0.0054, 'GLU-CG': -0.0236, 'GLU-CA': 0.0588, 'GLU-CB': 0.0909, 'HID-HA': 0.0958,
'VAL-CB': 0.3196,
'TRP-CE2': 0.1575, 'TRP-CE3': -0.2265, 'ASP-N': 0.0782, 'ASP-O': -0.5889, 'VAL-C': 0.6163,
'HID-HE1': 0.1385,
'VAL-O': -0.5722, 'VAL-N': 0.0577, 'CYX-CB': -0.0277, 'HIE-ND1': -0.5579, 'CYX-CA': 0.1055,
'SER-HB2': 0.0273,
'SER-HB3': 0.0273, 'ILE-CB': 0.1885, 'HID-CE1': 0.2127, 'ILE-CA': 0.0257, 'GLU-C': 0.5621,
'GLU-N': 0.0017,
'GLU-O': -0.5889, 'GLU-HG3': -0.0315, 'GLU-HG2': -0.0315, 'PRO-C': 0.526, 'GLU-OE1': -0.8189,
'PRO-O': -0.5,
'PRO-N': -0.202, 'ALA-HA': 0.0889, 'TYR-CA': 0.057, 'TYR-CB': 0.0659, 'TYR-CG': -0.0205, 'ILE-HB': 0.0213,
'ILE-HA': 0.1031, 'TYR-CZ': 0.3139, 'CYX-HB2': 0.068, 'CYX-HB3': 0.068, 'TYR-HB3': 0.0102,
'TYR-HB2': 0.0102,
'HID-CG': -0.0399, 'HID-CB': 0.0259, 'HID-CA': 0.0964, 'ILE-H3': 0.2329, 'ILE-H2': 0.2329,
'ILE-H1': 0.2329,
'ASN-HB3': 0.0515, 'THR-CG2': -0.2554, 'ASP-C': 0.5621, 'LEU-N': 0.101, 'LEU-O': -0.5713, 'LEU-C': 0.6123,
'ALA-H3': 0.1997, 'ALA-H2': 0.1997, 'ALA-H1': 0.1997, 'TYR-HE2': 0.165, 'TYR-HE1': 0.165,
'VAL-HG12': 0.0735,
'VAL-HG13': 0.0735, 'VAL-HG11': 0.0735, 'SER-CA': 0.0567, 'SER-CB': 0.2596, 'TYR-H1': 0.1873,
'TYR-H2': 0.1873,
'TYR-H3': 0.1873, 'PRO-H2': 0.312, 'PRO-H3': 0.312, 'HIE-HD2': 0.1963, 'VAL-H1': 0.2272, 'VAL-H2': 0.2272,
'VAL-H3': 0.2272, 'VAL-HA': 0.1093, 'VAL-HB': -0.0221, 'TYR-HA': 0.0983, 'PRO-HA': 0.1, 'TYR-HH': 0.4001,
'HID-HB3': 0.0209, 'HID-HB2': 0.0209, 'MET-C': 0.6123, 'MET-N': 0.1592, 'MET-O': -0.5713,
'CYS-HA': 0.1411,
'VAL-CG2': -0.3129, 'HIP-CE1': -0.0011, 'SER-N': 0.1849, 'SER-O': -0.5722, 'SER-C': 0.6163,
'LEU-CD1': -0.4106,
'LEU-CD2': -0.4104, 'TRP-NE1': -0.3444, 'LYS-HB3': 0.0283, 'ALA-N': 0.1414, 'ALA-O': -0.5722,
'ALA-C': 0.6163,
'ASP-OD2': -0.8084, 'ASP-OD1': -0.8084, 'GLN-HG2': 0.0331, 'GLN-HG3': 0.0331, 'GLN-HE21': 0.4429,
'GLN-HE22': 0.4429,
'ASN-H2': 0.1921, 'ARG-NE': -0.565, 'CYS-H2': 0.2023, 'CYS-H3': 0.2023, 'CYS-H1': 0.2023, 'ILE-C': 0.6123,
'PHE-HD1': 0.1374, 'ILE-HG13': 0.0201, 'THR-HG1': 0.407, 'PHE-HD2': 0.1374, 'ILE-HG12': 0.0201,
'ILE-N': 0.0311,
'ILE-O': -0.5713, 'HIE-H1': 0.2016, 'LYS-HE2': 0.1171, 'HIE-H3': 0.2016, 'HIE-H2': 0.2016,
'GLN-CA': 0.0536,
'GLN-CB': 0.0651, 'GLN-CD': 0.7354, 'GLN-CG': -0.0903, 'TRP-HE1': 0.3412, 'TRP-HE3': 0.1646,
'ARG-HD2': 0.0527,
'ARG-HD3': 0.0527, 'HIE-HA': 0.138, 'ARG-NH2': -0.8693, 'LEU-HB2': 0.0256, 'LEU-HB3': 0.0256,
'ASN-OD1': -0.5744,
'MET-HE1': 0.0597, 'MET-HE3': 0.0597, 'MET-HE2': 0.0597, 'HID-CD2': 0.1046, 'ILE-CG2': -0.372,
'ILE-CG1': -0.0387,
'HIP-ND1': -0.151, 'ASN-C': 0.6163, 'ASN-N': 0.1801, 'ASN-O': -0.5722, 'ARG-C': 0.7214, 'ARG-N': 0.1305,
'ARG-O': -0.6013, 'ASN-HD21': 0.4097, 'CYS-CB': -0.1195, 'CYS-CA': 0.0927, 'ASN-HD22': 0.4097,
'ACE-O': -0.5679,
'LEU-CG': 0.3421, 'LEU-CA': 0.0104, 'LEU-CB': -0.0244, 'TRP-CZ3': -0.2034, 'TRP-CZ2': -0.271,
'HIP-HB3': 0.0531,
'HIP-HB2': 0.0531, 'PHE-CE1': -0.1602, 'PHE-CE2': -0.1603, 'PHE-CZ': -0.1208, 'PHE-CA': 0.0733,
'PHE-CB': 0.033,
'PHE-CG': 0.0031, 'HIP-O': -0.6013, 'HIP-N': 0.256, 'HIP-C': 0.7214, 'MET-HB2': 0.0125,
'ASN-ND2': -0.8634,
'TRP-CA': 0.0421, 'TRP-CB': 0.0543, 'TRP-CG': -0.1654, 'TYR-O': -0.5713, 'TYR-N': 0.194, 'TYR-C': 0.6123,
'HIP-HA': 0.1047, 'HID-O': -0.5713, 'HID-N': 0.1542, 'PHE-H1': 0.1921, 'PHE-H2': 0.1921, 'PHE-H3': 0.1921,
'HID-C': 0.6123, 'GLU-HB2': -0.0232, 'GLU-HB3': -0.0232, 'THR-HG22': 0.0627, 'THR-HG23': 0.0627,
'THR-HG21': 0.0627,
'LYS-C': 0.7214, 'LYS-N': 0.0966, 'LYS-O': -0.6013, 'HIP-CD2': -0.1433, 'VAL-CG1': -0.3129,
'CYS-HSG': 0.1975,
'TRP-N': 0.1913, 'TRP-O': -0.5713, 'PHE-HA': 0.1041, 'ILE-CD1': -0.0908, 'TRP-C': 0.6123,
'HIP-H2': 0.1704,
'HIP-H3': 0.1704, 'HIP-H1': 0.1704, 'PHE-HZ': 0.1329, 'LYS-CG': -0.0048, 'LYS-CD': -0.0608,
'LYS-CE': -0.0181,
'LYS-CB': 0.0212, 'CYX-SG': -0.0984, 'LYS-CA': -0.0015, 'ASP-HB2': -0.0169, 'ASP-HB3': -0.0169,
'PRO-HB3': 0.1,
'PRO-HB2': 0.1, 'TYR-CD2': -0.2002, 'TYR-CD1': -0.2002, 'PHE-HE2': 0.1433, 'PHE-HE1': 0.1433,
'ARG-HH22': 0.4494,
'ARG-HH21': 0.4494, 'THR-C': 0.6163, 'LYS-HD2': 0.0633, 'LYS-HD3': 0.0633, 'THR-N': 0.1812,
'THR-O': -0.5722,
'ALA-CB': -0.0597, 'ALA-CA': 0.0962, 'PRO-CD': -0.012, 'PRO-CG': -0.121, 'TRP-HD1': 0.2195, 'PRO-CA': 0.1,
'PRO-CB': -0.115, 'PHE-HB3': 0.0104, 'PHE-HB2': 0.0104, 'ILE-HD12': 0.0226, 'ILE-HD13': 0.0226,
'HIE-N': 0.1472,
'ILE-HD11': 0.0226, 'GLY-HA3': 0.0895, 'GLY-HA2': 0.0895, 'SER-HA': 0.0782, 'SER-HG': 0.4239,
'SER-H1': 0.1898,
'SER-H3': 0.1898, 'SER-H2': 0.1898, 'PHE-C': 0.6123, 'TRP-CH2': -0.108, 'HIE-CD2': -0.2349,
'ASP-CB': -0.0235,
'HIP-CB': 0.0484, 'HIP-CA': 0.0581, 'ASP-CA': 0.0292, 'HIP-CG': -0.0236, 'ASP-CG': 0.8194,
'ARG-HG3': 0.0309,
'ARG-HG2': 0.0309, 'HIP-NE2': -0.1739, 'ACE-CH3': -0.3662, 'CYX-H3': 0.1815, 'CYX-H2': 0.1815,
'CYX-H1': 0.1815,
'LYS-H3': 0.2165, 'LYS-H2': 0.2165, 'LYS-H1': 0.2165, 'CYS-HB3': 0.1188, 'CYS-HB2': 0.1188,
'ARG-CZ': 0.8281,
'ARG-CG': 0.0236, 'ARG-CD': 0.0935, 'ARG-CB': 0.0118, 'ARG-CA': -0.0223, 'CYS-O': -0.5713,
'CYS-N': 0.1325,
'CYS-C': 0.6123, 'TRP-HZ2': 0.1589, 'TRP-HZ3': 0.1458, 'ACE-C': 0.5972, 'CYX-HA': 0.0922, 'LYS-HA': 0.118,
'PHE-CD2': -0.1391, 'PHE-CD1': -0.1392, 'CYX-N': 0.2069, 'CYX-O': -0.5713, 'CYX-C': 0.6123,
'PRO-HG2': 0.1,
'PRO-HG3': 0.1, 'TYR-OH': -0.5578, 'MET-CA': 0.0221, 'MET-CB': 0.0865, 'MET-CE': -0.0341,
'MET-CG': 0.0334,
'GLU-OE2': -0.8189, 'MET-H1': 0.1984, 'MET-H3': 0.1984, 'MET-H2': 0.1984, 'LEU-HD21': 0.098,
'LEU-HD23': 0.098,
'LEU-HD22': 0.098, 'HID-ND1': -0.3819, 'HIP-HD2': 0.2495, 'HIP-HD1': 0.3821, 'HIE-HB2': 0.0223,
'HIE-HB3': 0.0223,
'PHE-O': -0.5713, 'PHE-N': 0.1737, 'ASP-H3': 0.22, 'ASP-H2': 0.22, 'ASP-H1': 0.22, 'ASP-HA': 0.1141,
'ARG-HB2': 0.0226,
'ARG-HB3': 0.0226, 'TRP-CD1': -0.1788, 'MET-SD': -0.2774, 'TRP-CD2': 0.1132, 'ACE-HH33': 0.1123,
'ACE-HH32': 0.1123,
'ACE-HH31': 0.1123, 'HID-HD2': 0.1299, 'HID-HD1': 0.3632, 'MET-HA': 0.1116, 'MET-HG3': 0.0292,
'MET-HG2': 0.0292,
'TRP-HH2': 0.1411, 'LYS-HZ2': 0.3382, 'LYS-HZ3': 0.3382, 'LYS-HZ1': 0.3382, 'GLN-OE1': -0.6133,
'ARG-HE': 0.3592,
'ARG-HA': 0.1242, 'HIE-NE2': -0.2781, 'VAL-HG23': 0.0735, 'VAL-HG22': 0.0735, 'VAL-HG21': 0.0735,
'LYS-NZ': -0.3764,
'ARG-H3': 0.2083, 'ARG-H2': 0.2083, 'ARG-H1': 0.2083, 'HIE-O': -0.5713, 'SER-OG': -0.6714,
'GLN-NE2': -1.0031,
'HIE-C': 0.6123, 'LYS-HG3': 0.0121, 'LYS-HG2': 0.0121, 'HIE-HE1': 0.1397, 'HIE-HE2': 0.3324,
'GLN-HA': 0.1015,
'ASN-H1': 0.1921, 'ASN-H3': 0.1921, 'ASN-CG': 0.5833, 'ARG-HH12': 0.4494, 'ASN-CA': 0.0368,
'ASN-CB': -0.0283,
'ARG-HH11': 0.4494, 'CYS-SG': -0.3298, 'TRP-HA': 0.1162, 'LEU-HA': 0.1053, 'ARG-NH1': -0.8693,
'GLN-HB3': 0.005,
'GLN-HB2': 0.005, 'LEU-HG': -0.038, 'TYR-HD1': 0.172, 'TYR-HD2': 0.172, 'GLN-C': 0.6123, 'HIE-CG': 0.174,
'HIE-CA': 0.0236, 'HIE-CB': 0.0489, 'MET-HB3': 0.0125, 'TRP-H1': 0.1888, 'TRP-H3': 0.1888,
'TRP-H2': 0.1888,
'LEU-H1': 0.2148, 'LEU-H3': 0.2148, 'LEU-H2': 0.2148, 'ASN-HA': 0.1231, 'LEU-HD11': 0.098,
'LEU-HD12': 0.098,
'LEU-HD13': 0.098, 'GLN-H1': 0.1996, 'GLN-H2': 0.1996, 'GLN-H3': 0.1996, 'THR-HA': 0.1087,
'LYS-HE3': 0.1171,
'THR-HB': -0.0323, 'TRP-HB2': 0.0222, 'TRP-HB3': 0.0222, 'HIE-CE1': 0.1804, 'GLY-CA': -0.01,
'TYR-CE1': -0.2239,
'TYR-CE2': -0.2239, 'GLY-C': 0.6163, 'GLY-N': 0.2943, 'GLY-O': -0.5722, 'THR-H1': 0.1934,
'THR-H3': 0.1934,
'THR-H2': 0.1934, 'GLN-O': -0.5713, 'GLN-N': 0.1493, 'GLY-H1': 0.1642, 'GLY-H3': 0.1642, 'GLY-H2': 0.1642}
unified_charges = {'GLN-CB': 0.030600000000000002, "DA5-C2'": 0.0582, 'RCN-O2': -0.6252,
"RGN-O2'": -0.19529999999999997,
"DCN-O5'": -0.18960000000000005, 'RG5-C2': 0.7657, "RG-O5'": -0.4989, 'RAN-H5T': 0.4295,
'LEU-CA': 0.040400000000000005, "RU-C4'": 0.2239, 'THR-OG1': -0.2659, 'DGN-H5T': 0.4422,
'LYN-CA': 0.027340000000000003, "RG-C5'": 0.1916, "DT-C3'": 0.1698, 'THR-CA': 0.0618,
'THR-CB': 0.36970000000000003,
"RA-C3'": 0.2637, 'LYN-C': 0.5973, 'LYN-CB': 0.019550000000000005, "RAN-C2'": 0.1642,
"RU-O4'": -0.3548,
'RU-P': 1.1662, "RAN-O5'": -0.19279999999999997, 'HID-NE2': -0.5727, 'GLU-CD': 0.8054,
'VAL-CA': 0.009400000000000006,
"DC3-C4'": 0.28049999999999997, 'GLU-CG': -0.0714, "DG-C2'": 0.0582, 'GLU-CA': 0.1502,
'GLU-CB': 0.0214,
'DCN-H5T': 0.4422, "RA3-O5'": -0.4989, "RAN-C5'": 0.1916, 'LEU-C': 0.5973, 'TRP-CE2': 0.138,
'TRP-CE3': -0.06869999999999998, 'DG-O1P': -0.7761, "RG3-O5'": -0.4989, 'SER-O': -0.5679,
'RA-P': 1.1662,
"RC3-O3'": -0.21650000000000003, 'DA-O1P': -0.7761, 'CYM-SG': -0.8844, 'DCN-C2': 0.7959,
"DC5-O4'": -0.3691,
'DCN-C6': 0.211, 'DCN-C5': -0.3359, 'DCN-C4': 0.8439, 'VAL-C': 0.5973, "DG-C5'": 0.14389999999999997,
'DG3-N9': 0.0577,
"RC5-O5'": -0.19279999999999997, 'DG3-N3': -0.6636, 'DG3-N2': -0.07600000000000007,
'DG3-N1': -0.1533, 'TRP-C': 0.5973,
'DG3-N7': -0.5725, 'VAL-N': -0.14380000000000004, 'CYX-CB': 0.103, "DT-C4'": 0.28049999999999997,
'RG3-P': 1.1662,
'DC3-O2P': -0.7761, "DGN-C5'": 0.14389999999999997, 'DT3-O1P': -0.7761, "RCN-O4'": -0.3548,
"DG5-C1'": 0.2104,
'ILE-CB': 0.149, 'HID-CE1': 0.3449, 'ILE-CA': 0.027200000000000002, "DTN-C5'": 0.14389999999999997,
'DC5-N4': -0.11449999999999994, "RUN-O3'": -0.21650000000000003, 'DT5-C2': 0.5677, 'DT5-C5': 0.0025,
'DT5-C4': 0.5194,
'DC5-N3': -0.7748, 'DT-O2': -0.5881, "RUN-C2'": 0.1642, 'LYN-N': -0.14380000000000004,
'RC5-H5T': 0.4295,
'GLU-C': 0.5366, 'DT-O1P': -0.7761, "DAN-C4'": 0.28049999999999997, "RG3-C5'": 0.1916,
'GLU-N': -0.22269999999999995,
"DA5-C3'": 0.1698, "DCN-C1'": 0.1847, "DG3-C1'": 0.2104, "RA5-C5'": 0.1916, "RU3-C5'": 0.1916,
'GLU-OE2': -0.8188,
'DA3-O2P': -0.7761, "RG3-C1'": 0.2197, 'PRO-O': -0.5748, 'PRO-N': -0.2548, "RGN-C1'": 0.2197,
'RGN-N7': -0.5709,
'RGN-N2': -0.09439999999999993, 'RGN-N3': -0.6323, 'RGN-N1': -0.13630000000000003,
"DAN-O4'": -0.3691,
'RG-O1P': -0.776, 'RU5-N3': -0.03949999999999998, 'DG-C5': 0.1991, 'TYR-CA': 0.0862,
'RUN-O2': -0.5477,
'RG-C5': 0.1744, 'TYR-CG': -0.0011, 'RUN-O4': -0.5761, "RG5-O3'": -0.5246, 'RA5-C2': 0.6348,
'RA3-O1P': -0.776,
'DT-C2': 0.5677, "RC5-C3'": 0.2637, "DTN-C2'": 0.0582, 'DT-C5': 0.0025, 'RG3-C5': 0.1744,
"RGN-C3'": 0.2637,
'DGN-O6': -0.5699, "RU5-C3'": 0.2637, "RAN-C1'": 0.24009999999999998, 'PHE-CZ': 0.022500000000000006,
'RG-C8': 0.3014,
'RA5-C4': 0.3053, 'HID-CA': 0.1069, "RC3-O4'": -0.3548, 'RA5-C5': 0.0515,
'THR-CG2': -0.051199999999999996,
'ASP-C': 0.5366, 'LEU-N': -0.14380000000000004, 'LEU-O': -0.5679, 'ASH-O': -0.5679,
'GLH-N': -0.14380000000000004,
'ASP-N': -0.22269999999999995, 'DTN-H5T': 0.4422, 'RC-O2P': -0.776, "RC3-C4'": 0.2239,
'RA3-N9': -0.0251,
'RA3-N7': -0.6073, 'RA3-N6': -0.07890000000000008, "RU5-O2'": -0.19529999999999997, 'RCN-C6': 0.2011,
'RA3-N3': -0.6997, 'RA3-N1': -0.7615, 'RCN-C2': 0.7538, 'RA5-C8': 0.3559,
'ASH-N': -0.14380000000000004,
"DC5-C3'": 0.1698, 'RG5-N9': 0.0492, "DGN-O5'": -0.18960000000000005, 'RG3-O2P': -0.776,
'RA5-C6': 0.7009,
'RG5-N2': -0.09439999999999993, "DA-C2'": 0.0582, "DTN-O3'": -0.21530000000000005, 'RC5-O2': -0.6252,
'ASP-O': -0.5819,
'SER-CA': 0.0594, "DT-O5'": -0.4954, "DT5-C2'": 0.0582, 'RAN-N9': -0.0251,
"DG5-O5'": -0.18960000000000005,
"DC3-C1'": 0.1847, 'RA5-H5T': 0.4295, "DG-C1'": 0.2104, 'RAN-N6': -0.07890000000000008,
'RAN-N7': -0.6073,
'LYS-N': -0.07319999999999999, 'SER-CB': 0.2821, 'LYS-O': -0.5894, "DC-O3'": -0.5232,
"RC-O2'": -0.19529999999999997,
"RU-C1'": 0.24980000000000002, 'RU3-O4': -0.5761, "DT3-C2'": 0.0582, 'RU3-O2': -0.5477,
"RA5-C1'": 0.24009999999999998,
"DA3-C3'": 0.1698, "DT3-O5'": -0.4954, "DG5-C5'": 0.14389999999999997, "RG5-C2'": 0.1642,
"RC-O4'": -0.3548,
'DG5-H5T': 0.4422, 'ASH-C': 0.5973, 'DA5-H5T': 0.4422, "RU5-C4'": 0.2239, "RG5-C5'": 0.1916,
'DC5-C2': 0.7959,
'DAN-C4': 0.38, 'DC3-N4': -0.11449999999999994, 'DC5-C6': 0.211, "RU3-C4'": 0.2239, 'DC5-C4': 0.8439,
'DC5-C5': -0.3359, "RA3-C4'": 0.2239, 'MET-O': -0.5679, "DG-O5'": -0.4954,
"RG3-O2'": -0.19529999999999997,
'DG5-N1': -0.1533, 'DG5-N3': -0.6636, 'DG5-N2': -0.07600000000000007, 'DG5-N7': -0.5725,
'DG5-N9': 0.0577,
"RC-O3'": -0.5246, "RU3-C1'": 0.24980000000000002, 'HIP-CE1': 0.2511,
"DA5-O5'": -0.18960000000000005,
"DC-C2'": 0.0582, 'RUN-C6': 0.10619999999999999, 'RUN-C5': -0.18239999999999998, 'RUN-C4': 0.5952,
'RA3-C8': 0.3559,
'RUN-C2': 0.4687, 'SER-N': -0.14380000000000004, "RA5-O5'": -0.19279999999999997, 'RA3-C4': 0.3053,
'RA3-C5': 0.0515,
'RA3-C6': 0.7009, 'SER-C': 0.5973, 'LEU-CD1': -0.11210000000000003, "RCN-C4'": 0.2239,
'RA3-C2': 0.6348,
'LEU-CD2': -0.11210000000000003, 'DA-C2': 0.6314, 'DA5-N9': -0.0268, 'DA-C6': 0.6897,
'DG3-C6': 0.4918, 'DA-C4': 0.38,
'DA-C5': 0.0725, 'DA5-N3': -0.7417, 'DA-C8': 0.34840000000000004, "DCN-C4'": 0.28049999999999997,
'DA5-N7': -0.6175,
'DA5-N6': -0.07889999999999997, 'RG-P': 1.1662, "DT3-C5'": 0.14389999999999997, 'ALA-O': -0.5679,
'ALA-C': 0.5973,
'ASP-OD2': -0.8014, 'ASP-OD1': -0.8014, "RCN-C3'": 0.2637, 'RG5-N1': -0.13630000000000003,
'HIE-CD2': -0.0345,
"DCN-O4'": -0.3691, 'PHE-CB': 0.0247, 'GLU-OE1': -0.8188, 'MET-SD': -0.2737, "RG5-O4'": -0.3548,
"RC5-C4'": 0.2239,
"RC5-C1'": 0.2095, 'ILE-CG2': -0.055800000000000016, 'LYS-NZ': 0.6346, "RG-C4'": 0.2239,
"RU3-O4'": -0.3548,
"RUN-O2'": -0.19529999999999997, 'ILE-C': 0.5973, "RA-O3'": -0.5246, "RA-C2'": 0.1642,
'ILE-CG1': 0.004200000000000002,
'ILE-N': -0.14380000000000004, 'ILE-O': -0.5679, 'DAN-N3': -0.7417, 'DAN-N1': -0.7624,
'DAN-N6': -0.07889999999999997,
'DAN-N7': -0.6175, "RCN-O3'": -0.21650000000000003, 'RC5-C2': 0.7538, 'DAN-N9': -0.0268,
'RC5-C4': 0.8185,
'RC5-C5': -0.3287, 'RC5-C6': 0.2011, "RAN-O4'": -0.3548, 'DT-O2P': -0.7761, "RU-C3'": 0.2637,
'RG5-C8': 0.3014,
'RG5-C4': 0.1222, "RG-C3'": 0.2637, 'RG5-C6': 0.477, "RC3-C1'": 0.2095, 'GLN-CD': 0.6951,
'RC3-P': 1.1662,
"DC3-C5'": 0.14389999999999997, "RA3-O2'": -0.19529999999999997, 'DT5-C6': 0.039799999999999974,
"RAN-C4'": 0.2239,
"RC-C3'": 0.2637, "DTN-O4'": -0.3691, "DA-C5'": 0.14389999999999997, 'RU3-C2': 0.4687,
'RA3-O2P': -0.776,
'RU3-C4': 0.5952, 'RU3-C5': -0.18239999999999998, 'RU3-C6': 0.10619999999999999, 'RG3-C8': 0.3014,
"RC5-O3'": -0.5246,
"DT5-C5'": 0.14389999999999997, 'RC-N4': -0.10619999999999996, 'RG3-C2': 0.7657, 'RCN-H5T': 0.4295,
'RG3-C6': 0.477,
'RG3-C4': 0.1222, "DC5-O5'": -0.18960000000000005, "DC3-C2'": 0.0582, 'RAN-C2': 0.6348,
"DG-C4'": 0.28049999999999997,
'RAN-C6': 0.7009, 'RAN-C5': 0.0515, 'RAN-C4': 0.3053, "DG3-O4'": -0.3691, 'RAN-C8': 0.3559,
"DT-C5'": 0.14389999999999997, "DC3-O4'": -0.3691, "DGN-C4'": 0.28049999999999997,
'ASN-OD1': -0.5931,
'DAN-C5': 0.0725, 'GLN-CG': 0.0059000000000000025, "RC-C4'": 0.2239, 'DG3-C2': 0.7432,
'DG3-C4': 0.1814,
'DG3-C5': 0.1991, "DG5-C2'": 0.0582, "DC3-O3'": -0.21530000000000005, 'DG3-C8': 0.2733,
"RC5-O4'": -0.3548,
'DA5-C2': 0.6314, 'DAN-H5T': 0.4422, 'LYS-CE': 0.2127, 'ARG-NE': -0.18389999999999995,
'DT-N3': -0.09199999999999997,
'DT-N1': -0.0239, 'DA5-C4': 0.38, "RG-O4'": -0.3548, "RUN-C5'": 0.1916, 'CYX-SG': -0.1081,
'HID-N': -0.14380000000000004, 'TYR-CE1': -0.0685, 'DA5-C6': 0.6897, 'LYS-CD': 0.0763,
'HIP-ND1': 0.2353,
"RA3-C1'": 0.24009999999999998, 'VAL-CG1': -0.08189999999999997, "DT3-C1'": 0.2484,
"RUN-O5'": -0.19279999999999997,
"DT5-O5'": -0.18960000000000005, 'ASN-C': 0.5973, "RU3-C2'": 0.1642, "DC-C1'": 0.1847,
'ILE-CD1': -0.010200000000000008, "RA5-O2'": -0.19529999999999997, 'ASN-O': -0.5679,
'RGN-O6': -0.5597,
'ARG-C': 0.7341, 'ARG-N': -0.07319999999999999, 'ARG-O': -0.5894, "DAN-O3'": -0.21530000000000005,
"RC5-O2'": -0.19529999999999997, 'LYS-CB': 0.063, 'DT5-H5T': 0.4422, 'CYS-CB': 0.09929999999999999,
'CYS-CA': 0.13369999999999999, 'RA-N7': -0.6073, 'RA-N6': -0.07890000000000008, 'DCN-N3': -0.7748,
'RA-N3': -0.6997,
'RA-N1': -0.7615, 'DT3-N1': -0.0239, "DTN-C3'": 0.1698, 'RA-N9': -0.0251, "RGN-C2'": 0.1642,
'DA3-N1': -0.7624,
'DA3-N3': -0.7417, 'DG3-P': 1.1659, 'DA3-N7': -0.6175, 'DA3-N6': -0.07889999999999997,
'DA3-N9': -0.0268,
'PHE-CD2': 0.007400000000000018, 'DC-O2P': -0.7761, "DC5-C2'": 0.0582,
"DCN-O3'": -0.21530000000000005,
'TRP-CZ3': -0.05249999999999999, 'RGN-H5T': 0.4295, 'RA3-P': 1.1662, "RC3-O5'": -0.4989,
'RG-N9': 0.0492,
"DC5-C4'": 0.28049999999999997, "RGN-O5'": -0.19279999999999997, 'PHE-CE2': -0.027400000000000008,
'RU-O2P': -0.776,
'RG-N1': -0.13630000000000003, 'RG-N3': -0.6323, 'RG-N2': -0.09439999999999993, 'RG-N7': -0.5709,
'PHE-CA': 0.0954,
"DA-O5'": -0.4954, 'DT5-N3': -0.09199999999999997, 'PHE-CG': 0.0118, 'DT5-N1': -0.0239,
'DC3-N3': -0.7748,
"RU5-O3'": -0.5246, "RA-C1'": 0.24009999999999998, 'HIE-CG': 0.1868, 'DC3-N1': -0.0339,
'HIP-O': -0.5894,
'HIP-N': -0.07319999999999999, 'RG3-O6': -0.5597, 'HIP-C': 0.7341, "DGN-O4'": -0.3691,
'RUN-H5T': 0.4295,
'HIE-CA': 0.07790000000000001, 'RC5-N3': -0.7584, 'RC5-N1': -0.0484, "DA-C1'": 0.2269,
'ASN-N': -0.14380000000000004,
"RAN-O3'": -0.21650000000000003, 'RC5-N4': -0.10619999999999996, 'ASN-ND2': -0.07990000000000008,
"DA5-O3'": -0.5232,
'MET-N': -0.14380000000000004, "DA3-O4'": -0.3691, "RU5-O4'": -0.3548, 'DAN-C8': 0.34840000000000004,
'TRP-CA': 0.0848,
'TRP-CB': 0.0628, 'VAL-CB': 0.2688, "DT-C1'": 0.2484, 'TRP-CG': -0.1415,
'RU3-N3': -0.03949999999999998,
"RG3-O3'": -0.21650000000000003, 'RU3-N1': 0.0418, 'TYR-O': -0.5679, 'TYR-N': -0.14380000000000004,
'TYR-C': 0.5973,
'VAL-O': -0.5679, "RA5-C2'": 0.1642, "DA3-C4'": 0.28049999999999997, 'RC-P': 1.1662,
"DT3-O4'": -0.3691,
'HID-O': -0.5679, 'RU5-C6': 0.10619999999999999, "RG5-C3'": 0.2637, 'HID-C': 0.5973,
'RCN-C5': -0.3287,
"DT5-C1'": 0.2484, "RC5-C5'": 0.1916, "DC-O5'": -0.4954, "RG5-C1'": 0.2197, "DGN-C3'": 0.1698,
'RC3-O2P': -0.776,
"DAN-C2'": 0.0582, "DA-O4'": -0.3691, 'DT-C4': 0.5194, 'DT-C7': 0.00410000000000002,
'DT-C6': 0.039799999999999974,
"RG3-C2'": 0.1642, 'RU3-P': 1.1662, "RA3-C5'": 0.1916, "DG-O4'": -0.3691, 'RCN-C4': 0.8185,
'DG-N9': 0.0577,
"RGN-C5'": 0.1916, 'DG-N3': -0.6636, 'DG-N2': -0.07600000000000007, 'DG-N1': -0.1533,
'DG-N7': -0.5725,
'HIP-CD2': 0.1176, 'VAL-CG2': -0.08189999999999997, "DC-C5'": 0.14389999999999997,
'DG3-O1P': -0.7761,
'RU3-O1P': -0.776, "DA-O3'": -0.5232, 'TRP-N': -0.14380000000000004, 'TRP-O': -0.5679,
'DA3-P': 1.1659,
"RA3-C2'": 0.1642, "DA5-C5'": 0.14389999999999997, 'DTN-N3': -0.09199999999999997, 'MET-CB': 0.0824,
'DTN-N1': -0.0239,
"DG3-C3'": 0.1698, "DT3-C4'": 0.28049999999999997, 'DC-C4': 0.8439, 'DC-C5': -0.3359, 'DC-C6': 0.211,
'RC-N1': -0.0484,
'RC3-N3': -0.7584, 'DC-C2': 0.7959, 'MET-CE': 0.1516, 'LYS-CG': 0.0393, 'DA-N7': -0.6175,
'DA-N6': -0.07889999999999997, 'DA-N1': -0.7624, 'DA5-C5': 0.0725, 'DA-N3': -0.7417,
'LYS-CA': -0.09739999999999999,
'DA5-C8': 0.34840000000000004, "RCN-C2'": 0.1642, 'DA-N9': -0.0268, 'RC3-O1P': -0.776,
'RG5-H5T': 0.4295,
'DC-P': 1.1659, 'RU5-O2': -0.5477, 'TRP-CZ2': -0.10289999999999999, 'RU5-O4': -0.5761,
'DCN-N1': -0.0339,
"RG5-O5'": -0.19279999999999997, 'RC-N3': -0.7584, 'DG-P': 1.1659, 'DT3-C2': 0.5677,
'GLH-OE1': -0.5838,
'RA-O2P': -0.776, 'DT3-C7': 0.00410000000000002, 'DT3-C6': 0.039799999999999974, 'DT3-C5': 0.0025,
'DT3-C4': 0.5194,
'DCN-N4': -0.11449999999999994, 'TYR-CD1': -0.020699999999999996, "RA-O2'": -0.19529999999999997,
'HID-CD2': 0.2439,
'DTN-O2': -0.5881, "RA-C5'": 0.1916, 'THR-C': 0.5973, 'DTN-O4': -0.5563,
'THR-N': -0.14380000000000004,
'THR-O': -0.5679, 'DG5-C6': 0.4918, 'DG5-C4': 0.1814, 'DG5-C5': 0.1991, 'DG5-C2': 0.7432,
"DA5-C4'": 0.28049999999999997, 'DT-O4': -0.5563, "RG-C2'": 0.1642, 'ALA-CB': -0.0016000000000000042,
'DG5-C8': 0.2733,
'ALA-CA': 0.11599999999999999, 'DC5-N1': -0.0339, "RU5-C1'": 0.24980000000000002, 'DA5-N1': -0.7624,
"RA3-O3'": -0.21650000000000003, 'PRO-CD': 0.09740000000000001, 'PRO-CG': 0.0615,
'PRO-CA': 0.037500000000000006,
'DT5-C7': 0.00410000000000002, "RC-C2'": 0.1642, 'PRO-CB': 0.0436, "DA-C4'": 0.28049999999999997,
'DTN-C5': 0.0025,
'DTN-C6': 0.039799999999999974, 'DTN-C7': 0.00410000000000002, 'DTN-C2': 0.5677, 'RU3-O2P': -0.776,
"DT-O3'": -0.5232,
"DT5-C4'": 0.28049999999999997, 'CYX-CA': 0.1195, 'DC-N4': -0.11449999999999994, 'DC-N3': -0.7748,
'PRO-C': 0.5896,
'DC-N1': -0.0339, 'ALA-N': -0.14380000000000004, "DC3-C3'": 0.1698, 'HIE-N': -0.14380000000000004,
'HIE-O': -0.5679,
'RC3-O2': -0.6252, 'HIE-C': 0.5973, "DG3-O5'": -0.4954, "RA-O5'": -0.4989, 'RA5-N9': -0.0251,
"RU3-O5'": -0.4989,
'RA5-N7': -0.6073, 'RA5-N6': -0.07890000000000008, 'RA5-N1': -0.7615, 'RA5-N3': -0.6997,
"DC5-C1'": 0.1847,
'CYM-CA': 0.0157, 'DC-O1P': -0.7761, "DA3-C1'": 0.2269, "DT3-O3'": -0.21530000000000005,
"RCN-O2'": -0.19529999999999997, 'RC-C2': 0.7538, "DG5-C3'": 0.1698,
'TRP-NE1': -0.0005999999999999894,
'RC-C6': 0.2011, 'RC-C4': 0.8185, 'RC-C5': -0.3287, 'CYM-CB': -0.0169,
'LEU-CB': -0.018800000000000004,
'TRP-CH2': 0.028299999999999992, "RG-O3'": -0.5246, "RU3-O2'": -0.19529999999999997,
'ASP-CB': -0.054700000000000006,
'HIP-CB': 0.12060000000000001, 'HIP-CA': -0.01419999999999999, 'ASP-CA': 0.1261, 'HIP-CG': -0.0012,
'ASP-CG': 0.7994,
'DG3-O2P': -0.7761, 'RU-N3': -0.03949999999999998, 'RU-N1': 0.0418, "DA5-C1'": 0.2269,
"DG-O3'": -0.5232,
"DCN-C3'": 0.1698, 'HIP-NE2': 0.2193, 'RU-O1P': -0.776, 'DT3-P': 1.1659, 'DG-C2': 0.7432,
"RUN-O4'": -0.3548,
'DG-C4': 0.1814, "DT5-O4'": -0.3691, 'DG-C6': 0.4918, "RU3-C3'": 0.2637, 'DG-C8': 0.2733,
'DA-P': 1.1659,
'RU5-C5': -0.18239999999999998, 'RU5-C2': 0.4687, "RA5-O3'": -0.5246, "RU-O3'": -0.5246,
"RC3-C2'": 0.1642,
'LYN-O': -0.5679, "RC-C5'": 0.1916, 'GLU-O': -0.5819, 'GLH-OE2': -0.187, 'DT3-O4': -0.5563,
'DT3-O2': -0.5881,
"DG3-C4'": 0.28049999999999997, 'RU5-H5T': 0.4295, 'ARG-CZ': 0.8076, "RUN-C1'": 0.24980000000000002,
'DC3-P': 1.1659,
'ARG-CG': 0.096, 'ARG-CD': 0.186, 'ARG-CB': 0.06470000000000001, 'ARG-CA': -0.10769999999999999,
'DT-P': 1.1659,
"RU-C2'": 0.1642, 'DCN-O2': -0.6548, 'PHE-CE1': -0.027400000000000008, 'RG5-N7': -0.5709,
'CYS-O': -0.5679,
"RU5-C5'": 0.1916, 'RCN-N3': -0.7584, 'CYS-C': 0.5973, 'RCN-N1': -0.0484,
'RCN-N4': -0.10619999999999996,
'DAN-C6': 0.6897, "RGN-O3'": -0.21650000000000003, 'DGN-C8': 0.2733, 'DGN-C6': 0.4918,
'DGN-C5': 0.1991,
'DGN-C4': 0.1814, 'DGN-C2': 0.7432, "RGN-O4'": -0.3548, "RU-C5'": 0.1916, 'RG-O6': -0.5597,
'TYR-CD2': -0.020699999999999996, 'PHE-CD1': 0.007400000000000018, "DT5-O3'": -0.5232,
'DT5-O4': -0.5563,
'CYX-N': -0.14380000000000004, 'CYX-O': -0.5679, 'DT5-O2': -0.5881, 'CYX-C': 0.5973,
"DT-C2'": 0.0582,
'RG3-N9': 0.0492, 'RG3-N7': -0.5709, 'RG3-N1': -0.13630000000000003, 'TYR-OH': -0.15869999999999995,
'RG3-N3': -0.6323,
'RG3-N2': -0.09439999999999993, 'MET-CA': 0.0643, "RAN-C3'": 0.2637, "RU-O5'": -0.4989,
'MET-CG': 0.08979999999999999,
'RC3-C6': 0.2011, "RAN-O2'": -0.19529999999999997, 'RC3-C4': 0.8185, 'RC3-C5': -0.3287,
'RC3-C2': 0.7538,
"DA3-O5'": -0.4954, 'HID-ND1': -0.016199999999999992, "RU5-O5'": -0.19279999999999997,
"DAN-C3'": 0.1698,
'DTN-C4': 0.5194, "DG-C3'": 0.1698, 'PHE-O': -0.5679, 'PHE-N': -0.14380000000000004,
"RA3-O4'": -0.3548,
"DTN-O5'": -0.18960000000000005, 'PHE-C': 0.5973, 'RG5-N3': -0.6323, "DA3-O3'": -0.21530000000000005,
'DC3-C4': 0.8439,
'DC3-C5': -0.3359, 'DC3-C6': 0.211, "RG3-O4'": -0.3548, 'DC3-C2': 0.7959, 'DAN-C2': 0.6314,
'RA-O1P': -0.776,
"RC3-C5'": 0.1916, "RA5-C3'": 0.2637, "DA3-C5'": 0.14389999999999997, 'TRP-CD1': 0.04239999999999999,
"DC5-O3'": -0.5232, 'TRP-CD2': 0.1243, 'DA3-C6': 0.6897, 'DA3-C4': 0.38, 'DA3-C5': 0.0725,
'DA3-C2': 0.6314,
"DC-O4'": -0.3691, 'LEU-CG': 0.317, 'DA3-C8': 0.34840000000000004, "DGN-C2'": 0.0582,
"DAN-C1'": 0.2269,
'DG3-O6': -0.5699, "DG3-C2'": 0.0582, "DC3-O5'": -0.4954, "DTN-C4'": 0.28049999999999997,
'GLN-OE1': -0.6086,
'DC5-O2': -0.6548, "DC-C4'": 0.28049999999999997, 'CYM-O': -0.5679, 'RA-C4': 0.3053, 'RA-C5': 0.0515,
'RA-C6': 0.7009,
'RA-C2': 0.6348, 'HIE-NE2': 0.05439999999999995, "RG3-C4'": 0.2239, "RA3-C3'": 0.2637,
'RA-C8': 0.3559,
'LYN-CE': 0.25888, 'LYN-CD': -0.014580000000000001, 'LYN-CG': 0.08694, 'RGN-C2': 0.7657,
'RGN-C5': 0.1744,
'RGN-C4': 0.1222, 'RGN-C6': 0.477, "RA5-C4'": 0.2239, 'RGN-C8': 0.3014, 'RGN-N9': 0.0492,
'RC-O2': -0.6252,
"RCN-O5'": -0.19279999999999997, 'GLH-CB': 0.0441, 'GLH-CA': 0.0924, 'GLH-CG': 0.0686,
"RCN-C1'": 0.2095,
'GLH-CD': 0.6801, 'TYR-CB': 0.0438, 'DA-O2P': -0.7761, "DAN-O5'": -0.18960000000000005,
'RG-C6': 0.477,
'RG-C4': 0.1222, "DC-C3'": 0.1698, 'RG-C2': 0.7657, 'RU5-C4': 0.5952, "RU-O2'": -0.19529999999999997,
'RUN-N1': 0.0418,
'GLH-O': -0.5679, 'RUN-N3': -0.03949999999999998, 'GLH-C': 0.5973, "RG5-O2'": -0.19529999999999997,
"RG3-C3'": 0.2637,
'DG-O6': -0.5699, "RC5-C2'": 0.1642, 'DGN-N9': 0.0577, 'SER-OG': -0.22709999999999997,
'RC-O1P': -0.776,
"RGN-C4'": 0.2239, 'DGN-N7': -0.5725, 'DGN-N1': -0.1533, 'DGN-N2': -0.07600000000000007,
'DGN-N3': -0.6636,
'GLN-NE2': -0.09050000000000008, 'DT3-N3': -0.09199999999999997, "RA-C4'": 0.2239, 'ASN-CG': 0.713,
'ASN-CA': 0.11910000000000001, 'ASN-CB': -0.04470000000000002, "DGN-O3'": -0.21530000000000005,
'CYS-SG': -0.11860000000000001, 'GLY-N': -0.14380000000000004, "RG-O2'": -0.19529999999999997,
'ARG-NH2': 0.03289999999999993, "RC3-O2'": -0.19529999999999997, 'ARG-NH1': 0.03289999999999993,
"RG-C1'": 0.2197,
"RC3-C3'": 0.2637, "DG5-O3'": -0.5232, "RU5-C2'": 0.1642, 'GLN-C': 0.5973,
"DAN-C5'": 0.14389999999999997,
'RG5-O6': -0.5597, 'MET-C': 0.5973, 'DC3-O2': -0.6548, "RC-C1'": 0.2095, "DA-C3'": 0.1698,
'HIE-CB': 0.066,
'RG-O2P': -0.776, 'LYS-C': 0.7341, 'DA3-O1P': -0.7761, 'DC-O2': -0.6548,
"DG3-O3'": -0.21530000000000005,
"DT5-C3'": 0.1698, "DT-O4'": -0.3691, "DG5-O4'": -0.3691, "DC5-C5'": 0.14389999999999997,
'TYR-CZ': 0.3226,
'RC3-N1': -0.0484, "DA5-O4'": -0.3691, 'HIE-CE1': 0.307, 'RC3-N4': -0.10619999999999996,
"RA-O4'": -0.3548,
'DC5-H5T': 0.4422, 'ASH-OD2': -0.16289999999999993, 'ASH-OD1': -0.5554, "DT3-C3'": 0.1698,
'CYS-N': -0.14380000000000004, "RUN-C3'": 0.2637, "DA3-C2'": 0.0582, 'LYN-NZ': -0.26372999999999996,
"DG5-C4'": 0.28049999999999997, "RUN-C4'": 0.2239, "RC-O5'": -0.4989, 'RU-C2': 0.4687,
'RAN-N1': -0.7615,
"RU3-O3'": -0.21650000000000003, "DGN-C1'": 0.2104, 'RU-C4': 0.5952, "RG5-C4'": 0.2239,
'RU-C5': -0.18239999999999998,
'GLY-CA': 0.1144, 'RU-C6': 0.10619999999999999, 'RU-O4': -0.5761, "DCN-C2'": 0.0582,
'RU-O2': -0.5477,
"DTN-C1'": 0.2484, 'DG5-O6': -0.5699, 'CYM-N': -0.14380000000000004, 'CYM-C': 0.5973,
'HIE-ND1': -0.5432,
'RG3-O1P': -0.776, 'DG-O2P': -0.7761, 'RU5-N1': 0.0418, 'TYR-CE2': -0.0685, "RA5-O4'": -0.3548,
'GLY-C': 0.5973,
"RCN-C5'": 0.1916, 'GLY-O': -0.5679, 'RAN-N3': -0.6997, 'ASH-CB': 0.066, 'HID-CG': -0.0266,
'ASH-CA': 0.1205,
'ASH-CG': 0.6462, "DCN-C5'": 0.14389999999999997, "DG3-C5'": 0.14389999999999997, 'GLN-O': -0.5679,
'GLN-N': -0.14380000000000004, 'GLN-CA': 0.0819, 'RG5-C5': 0.1744, 'DC3-O1P': -0.7761,
'DT3-O2P': -0.7761,
'HID-CB': 0.0342}
amber_types = {'DC3-H3T': 'HO', "RGN-O2'": 'OH', 'RAN-H5T': 'HO', "RU-C4'": 'CT', "DT-C3'": 'CT', "RAN-C2'": 'CT',
"RU-O4'": 'OS',
"DTN-H2'1": 'HC', "DTN-H2'2": 'HC', 'LYS-HB2': 'HC', "DG3-H2'1": 'HC', "DG3-H2'2": 'HC',
'ILE-HG23': 'HC',
'HID-NE2': 'NB', 'GLU-CD': 'C', 'GLU-CG': 'CT', 'GLU-CA': 'CT', 'GLU-CB': 'CT', "DC3-H3'": 'H1',
'DT-H3': 'H',
'DG-O1P': 'O2', "DA5-H1'": 'H2', "RC5-O2'": 'OH', 'DCN-C2': 'C', 'DCN-C6': 'CM', 'DCN-C5': 'CM',
'DCN-C4': 'CA',
'CYX-CB': 'CT', 'HIE-ND1': 'NB', 'CYX-CA': 'CT', 'RU3-C2': 'C', "RCN-O4'": 'OS', 'SER-HB2': 'H1',
'SER-HB3': 'H1',
'DC5-N4': 'N2', 'DC5-N1': 'N*', 'DC5-N3': 'NC', 'RC5-H5T': 'HO', "DCN-C1'": 'CT', 'GLU-HG2': 'HC',
"RA5-C5'": 'CT',
'RGN-N7': 'NB', 'RGN-N2': 'N2', 'RGN-N3': 'NC', 'RGN-N1': 'NA', 'RGN-N9': 'N*', 'RUN-O2': 'O',
'DT5-H6': 'H4',
'DT5-H3': 'H', "RG5-O3'": 'OS', 'RA3-O1P': 'O2', "DTN-C2'": 'CT', 'DTN-H5T': 'HO', 'TYR-HE2': 'HA',
'TYR-HE1': 'HA',
'RCN-C5': 'CM', 'RCN-C4': 'CA', 'RCN-C6': 'CM', 'RCN-C2': 'C', 'CYM-HB3': 'H1', 'CYM-HB2': 'H1',
"RA3-HO'2": 'HO',
"DC3-C1'": 'CT', "DT5-H2'1": 'HC', "DT5-H2'2": 'HC', 'RU3-O4': 'O', "DT3-C2'": 'CT', 'RU3-O2': 'O',
'VAL-HA': 'H1',
'VAL-HB': 'HC', 'PRO-HA': 'H1', 'DG5-H5T': 'HO', "RG5-C5'": 'CT', "DA-O5'": 'OS', "RG3-C1'": 'CT',
"RU-HO'2": 'HO',
'DG5-N1': 'NA', 'DG5-N3': 'NC', 'DG5-N2': 'N2', 'DG5-N7': 'NB', 'DG5-N9': 'N*', "RUN-C1'": 'CT',
"RA5-O5'": 'OH',
"RCN-C4'": 'CT', 'DA5-N9': 'N*', 'DG3-C6': 'C', 'DA5-N3': 'NC', 'DA5-N1': 'NC', 'DA5-N7': 'NB',
'DA5-N6': 'N2',
'RG-P': 'P', 'ILE-HG21': 'HC', 'ILE-HG22': 'HC', "RC3-H5'1": 'H1', "RC3-H5'2": 'H1', 'DA-H2': 'H5',
'DA-H8': 'H5',
'RAN-H61': 'H', 'RAN-H62': 'H', 'DAN-N3': 'NC', 'DAN-N1': 'NC', 'DAN-N6': 'N2', 'DAN-N7': 'NB',
"RC3-H1'": 'H2',
'LYS-HE2': 'HP', 'RU-C2': 'C', 'DAN-N9': 'N*', 'RU-C4': 'C', 'RU-C5': 'CM', 'RU-C6': 'CM',
'RC5-H42': 'H',
'RC5-H41': 'H', "DA-C5'": 'CT', "RC5-O3'": 'OS', "RC5-H4'": 'H1', "RA-H1'": 'H2', "RU3-O4'": 'OS',
'MET-HE1': 'H1',
"DG-H1'": 'H2', 'MET-HE2': 'H1', "RCN-O3'": 'OH', 'ARG-NE': 'N2', 'DT-H6': 'H4', 'DCN-H42': 'H',
"DA5-H2'2": 'HC',
"DC-C1'": 'CT', "RAN-H2'1": 'H1', 'RGN-O6': 'O', "RU3-H5'2": 'H1', "RU3-H5'1": 'H1', "DT3-C1'": 'CT',
'RA-N7': 'NB',
'DCN-N1': 'N*', 'DCN-N3': 'NC', 'DCN-N4': 'N2', 'RA-N1': 'NC', "DTN-C3'": 'CT', 'RA-N9': 'N*',
'LEU-CG': 'CT',
'LEU-CA': 'CT', 'LEU-CB': 'CT', 'HIP-HB3': 'HC', 'HIP-HB2': 'HC', "DTN-H4'": 'H1', 'RG3-O6': 'O',
'ASN-ND2': 'N',
"DT-H4'": 'H1', "DC5-H1'": 'H2', "DG5-H4'": 'H1', 'RU3-N3': 'NA', 'RU3-N1': 'N*', "DGN-C3'": 'CT',
"DA-O4'": 'OS',
"RG3-C2'": 'CT', 'RU3-P': 'P', 'THR-HG23': 'HC', 'THR-HG21': 'HC', 'LYS-C': 'C', 'LYS-N': 'N',
'LYS-O': 'O',
'LYS-H': 'H', 'DG3-O1P': 'O2', "DAN-H1'": 'H2', 'DA5-C2': 'CQ', 'DA5-C4': 'CB', 'DA5-C5': 'CB',
'DA5-C6': 'CA',
'DA5-C8': 'CK', 'RC3-O1P': 'O2', 'ASP-HB2': 'HC', 'ASP-HB3': 'HC', 'RC-N3': 'NC', "DC3-H1'": 'H2',
'RG5-H5T': 'HO',
'LYS-HD2': 'HC', 'LYS-HD3': 'HC', 'DG5-C6': 'C', 'DG5-C4': 'CB', 'CYM-CB': 'CT', 'DG5-C2': 'CA',
'DG5-C8': 'CK',
'DT5-C5': 'CM', 'DTN-C4': 'C', 'DTN-C5': 'CM', 'DTN-C6': 'CM', 'DT5-C6': 'CM', 'DTN-C2': 'C',
"RU-H5'2": 'H1',
"RU-H5'1": 'H1', 'DG-H21': 'H', 'DG-H22': 'H', "DA3-H1'": 'H2', 'ILE-HD12': 'HC', 'ILE-HD13': 'HC',
'ILE-HD11': 'HC',
"RU5-H1'": 'H2', "DG-C3'": 'CT', 'RA5-N9': 'N*', "RU3-O5'": 'OS', 'RA5-N7': 'NB', 'RA5-N6': 'N2',
'RA5-N1': 'NC',
'RA5-N3': 'NC', "RCN-O2'": 'OH', "RC-H5'1": 'H1', "RC-H5'2": 'H1', 'HIE-CD2': 'CW', 'HIP-CB': 'CT',
'HIP-CA': 'CT',
'HIP-CG': 'CC', 'RU-N3': 'NA', 'RU-N1': 'N*', "DG-O3'": 'OS', "DCN-C3'": 'CT', 'DA-P': 'P',
'DTN-H3T': 'HO',
"DG3-C4'": 'CT', "RU-H3'": 'H1', 'ARG-CZ': 'CA', 'ARG-CG': 'CT', "DA-H3'": 'H1', 'ARG-CB': 'CT',
'ARG-CA': 'CT',
'GLU-HG3': 'HC', 'DCN-O2': 'O', "DCN-H4'": 'H1', "DA5-H2'1": 'HC', 'DCN-H41': 'H', 'TRP-HZ3': 'HA',
"DTN-H3'": 'H1',
"DG5-H5'1": 'H1', "DG5-H5'2": 'H1', 'RG3-N9': 'N*', 'LYN-HZ3': 'H', 'LYN-HZ2': 'H', 'RG3-N7': 'NB',
'RG3-N1': 'NA',
'RG3-N3': 'NC', 'RG3-N2': 'N2', "DC3-H2'1": 'HC', "DC3-H2'2": 'HC', 'TRP-CD2': 'CB', 'DT-H71': 'HC',
'DT-H72': 'HC',
'DT-H73': 'HC', "DA3-O3'": 'OH', 'RGN-H21': 'H', 'ASP-HA': 'H1', 'ARG-HB2': 'HC', 'ARG-HB3': 'HC',
"RU3-HO'2": 'HO',
"DC5-O3'": 'OS', 'DT3-H72': 'HC', 'DT3-H73': 'HC', "RCN-HO'2": 'HO', 'DT3-H71': 'HC', 'DG3-O6': 'O',
"DGN-C2'": 'CT',
'TRP-HH2': 'HA', "RG3-C3'": 'CT', "RC-C5'": 'CT', "RUN-C3'": 'CT', 'VAL-HG23': 'HC', 'VAL-HG22': 'HC',
'VAL-HG21': 'HC', 'RG-C8': 'CK', 'RG-C6': 'C', 'RG-C4': 'CB', 'RG-C5': 'CB', 'RG-C2': 'CA', 'GLH-H': 'H',
'GLH-O': 'O',
'GLH-N': 'N', 'GLH-C': 'C', 'RUN-O4': 'O', "RU-C1'": 'CT', "RU3-C4'": 'CT', 'GLN-NE2': 'N',
'LYS-HG3': 'HC',
'LYS-HG2': 'HC', 'HIE-HE1': 'H5', 'HIE-HE2': 'H', 'GLN-HA': 'H1', 'ILE-HG13': 'HC', 'ILE-HG12': 'HC',
"RC3-H3'": 'H1',
"RC3-O2'": 'OH', 'LYS-HZ1': 'H', 'GLN-HB3': 'HC', 'GLN-HB2': 'HC', 'DA5-H61': 'H', "DAN-C5'": 'CT',
"DG-H3'": 'H1',
'HIE-CG': 'CC', 'HIE-CA': 'CT', 'HIE-CB': 'CT', 'DA3-O1P': 'O2', 'RC3-O2P': 'O2', "RA-H3'": 'H1',
'LEU-HD11': 'HC',
'LEU-HD12': 'HC', 'LEU-HD13': 'HC', 'RC3-H3T': 'HO', "RG3-H2'1": 'H1', "RU3-H3'": 'H1', "DAN-H2'2": 'HC',
"RG3-HO'2": 'HO', 'HIE-CE1': 'CR', 'TYR-HB2': 'HC', 'DC-H5': 'HA', 'DC-H6': 'H4', "RCN-H5'2": 'H1',
"RCN-H5'1": 'H1',
'RU-O4': 'O', "DCN-C2'": 'CT', 'RU-O2': 'O', "DTN-C1'": 'CT', 'CYM-O': 'O', 'CYM-N': 'N', 'DG5-C5': 'CB',
'CYM-C': 'C',
'DG-O2P': 'O2', 'RAN-H2': 'H5', 'GLY-C': 'C', 'GLY-N': 'N', 'GLY-O': 'O', 'GLY-H': 'H', 'ASH-CB': 'CT',
'ASH-CA': 'CT',
'ASH-CG': 'C', "DG3-C5'": 'CT', 'DCN-H6': 'H4', 'DCN-H5': 'HA', "RG-O5'": 'OS', 'ASH-HB2': 'HC',
'ASH-HB3': 'HC',
"RG-C5'": 'CT', "RG3-H4'": 'H1', 'THR-CA': 'CT', 'THR-CB': 'CT', "RCN-H4'": 'H1', 'ASH-HA': 'H1',
'RCN-H41': 'H',
"DC3-C4'": 'CT', 'RCN-H42': 'H', 'DCN-H5T': 'HO', "DC-H2'1": 'HC', "DC-H2'2": 'HC', "RC-C4'": 'CT',
"RA5-HO'2": 'HO',
'CYM-SG': 'SH', "DC5-O4'": 'OS', 'VAL-C': 'C', "DG-C5'": 'CT', 'DG3-N9': 'N*', 'DG3-N3': 'NC',
'DG3-N2': 'N2',
'DG3-N1': 'NA', 'VAL-H': 'H', 'DG3-N7': 'NB', 'VAL-N': 'N', "DGN-C5'": 'CT', 'DT3-O1P': 'O2',
"DG5-C1'": 'CT',
"RUN-C2'": 'CT', 'GLU-C': 'C', 'DT-O1P': 'O2', 'GLU-H': 'H', 'GLU-N': 'N', 'GLU-O': 'O', "RU3-C5'": 'CT',
'DA3-O2P': 'O2', 'DGN-H8': 'H5', 'DGN-H1': 'H', "DAN-O4'": 'OS', "RA3-H5'2": 'H1', "RA3-H5'1": 'H1',
'DT-C2': 'C',
'CYX-HB2': 'H1', 'CYX-HB3': 'H1', "RAN-H1'": 'H2', "RC3-O3'": 'OH', "RA3-C5'": 'CT', 'VAL-HG12': 'HC',
'VAL-HG13': 'HC', "RU5-O2'": 'OH', 'VAL-HG11': 'HC', "DAN-C4'": 'CT', "DGN-O5'": 'OH', "DA-C2'": 'CT',
'RC5-O2': 'O',
"DA3-H3'": 'H1', "RA-H4'": 'H1', "RU5-H3'": 'H1', 'HIE-HD2': 'H4', "DT-O5'": 'OS', "RC-HO'2": 'HO',
"RA5-C1'": 'CT',
'DA3-H3T': 'HO', "RC-H3'": 'H1', "RC-O4'": 'OS', 'HID-HB3': 'HC', 'HID-HB2': 'HC', 'DA5-H5T': 'HO',
'RC-H5': 'HA',
"DC5-C4'": 'CT', 'DAN-C4': 'CB', 'DAN-C6': 'CA', 'DAN-C2': 'CQ', 'MET-N': 'N', 'MET-O': 'O',
'RU-H5': 'HA',
"DG-O5'": 'OS', 'RU-H6': 'H4', 'RA5-H5T': 'HO', "DC-C2'": 'CT', 'RUN-C6': 'CM', 'RUN-C5': 'CM',
'RUN-C4': 'C',
'RUN-C2': 'C', 'LEU-CD1': 'CT', 'RA3-H61': 'H', 'RA3-H62': 'H', 'LEU-CD2': 'CT', 'DTN-O2': 'O',
'DTN-O4': 'O',
"DG3-C2'": 'CT', 'ALA-N': 'N', 'ALA-O': 'O', 'ALA-H': 'H', "DA-H1'": 'H2', 'ALA-C': 'C', 'RU5-H6': 'H4',
'RU5-H5': 'HA', 'RU5-H3': 'H', "DTN-H1'": 'H2', 'DA-H62': 'H', "RG-C4'": 'CT', "RA-O3'": 'OS',
'THR-HG1': 'HO',
'DTN-N1': 'N*', 'DT-O2P': 'O2', "DT-H1'": 'H2', 'RC3-P': 'P', "DC3-C5'": 'CT', "RC-C3'": 'CT',
"DTN-O4'": 'OS',
'ARG-HD2': 'H1', 'ARG-HD3': 'H1', 'RU3-C4': 'C', 'RU3-C5': 'CM', 'RU3-C6': 'CM', "DT3-H1'": 'H2',
"DT5-C5'": 'CT',
'DC-C5': 'CM', 'RCN-H5T': 'HO', "DC5-O5'": 'OH', "DG-C4'": 'CT', "DG3-O4'": 'OS', 'DC-C2': 'C',
"DGN-C4'": 'CT',
'DG3-C2': 'CA', 'DG3-C4': 'CB', 'DG3-C5': 'CB', "DG5-C2'": 'CT', 'DG3-C8': 'CK', 'LYS-CE': 'CT',
"RUN-C5'": 'CT',
"RUN-O5'": 'OH', "DT5-O5'": 'OH', 'ASN-C': 'C', "RU3-C2'": 'CT', 'ASN-H': 'H', 'ASN-N': 'N',
'ASN-O': 'O',
"DAN-O3'": 'OH', "RA5-H5'1": 'H1', 'DA3-P': 'P', "RA5-H5'2": 'H1', 'DC3-O2': 'O', "DCN-H2'2": 'HC',
"DCN-H2'1": 'HC',
"DA3-H5'1": 'H1', "RU-C3'": 'CT', 'DG3-P': 'P', 'DA5-H62': 'H', "RG-H2'1": 'H1', "RGN-O5'": 'OH',
'PHE-CZ': 'CA',
'PHE-CA': 'CT', 'PHE-CB': 'CT', 'RGN-H22': 'H', 'PHE-CG': 'CA', "RU5-O3'": 'OS', "RU-H4'": 'H1',
"DGN-O4'": 'OS',
"DA-C3'": 'CT', 'RC5-N3': 'NC', 'RC5-N1': 'N*', "DA-C1'": 'CT', "RAN-O3'": 'OH', 'RC5-N4': 'N2',
"DA5-O3'": 'OS',
"DA5-H4'": 'H1', 'TRP-CA': 'CT', 'TRP-CB': 'CT', 'TRP-CG': 'C*', "RA5-C2'": 'CT', 'HIP-HA': 'H1',
'RC-P': 'P',
"RU3-H1'": 'H2', 'DC-O2': 'O', "DC-O5'": 'OS', "DC5-C5'": 'CT', 'DT-C5': 'CM', 'DT-C4': 'C',
'DT-C7': 'CT',
'DT-C6': 'CM', 'GLU-HB2': 'HC', 'GLU-HB3': 'HC', "DG-O4'": 'OS', 'DT3-C5': 'CM', 'VAL-CG2': 'CT',
"DC-C5'": 'CT',
"RA3-C2'": 'CT', 'DA-H61': 'H', 'DTN-N3': 'NA', 'ILE-CD1': 'CT', "DG3-C3'": 'CT', "DA5-O4'": 'OS',
"DC3-O4'": 'OS',
'RU5-O2': 'O', 'RU5-O4': 'O', 'DT3-C2': 'C', 'RA-O2P': 'O2', 'DT3-C7': 'CT', 'DT3-C6': 'CM',
"RGN-C5'": 'CT',
'DT3-C4': 'C', "RA-O2'": 'OH', 'THR-C': 'C', 'THR-H': 'H', 'THR-N': 'N', 'THR-O': 'O', 'DA-C4': 'CB',
"DC3-H5'1": 'H1',
'PRO-CD': 'CT', 'PRO-CG': 'CT', 'PRO-CA': 'CT', "RC-C2'": 'CT', 'PRO-CB': 'CT', "DTN-O5'": 'OH',
'PHE-HB3': 'HC',
"DC-H5'2": 'H1', "DC-H5'1": 'H1', "DT-O3'": 'OS', "DT5-C4'": 'CT', "DA3-C2'": 'CT', "DG3-O5'": 'OS',
"DAN-H5'2": 'H1',
"DT3-O3'": 'OH', "DG5-C3'": 'CT', 'TRP-CH2': 'CA', "RU5-H2'1": 'H1', "RG-O3'": 'OS', "DAN-H2'1": 'HC',
'ASP-CB': 'CT',
'ASP-CA': 'CT', 'ASP-CG': 'C', 'LYN-HG2': 'HC', 'LYN-HG3': 'HC', 'ARG-HG3': 'HC', 'ARG-HG2': 'HC',
'DG-C2': 'CA',
"RUN-O4'": 'OS', 'DG-C4': 'CB', "DT5-O4'": 'OS', 'DG-C6': 'C', "RU3-C3'": 'CT', 'DG-C8': 'CK',
"RA-H5'2": 'H1',
"RA-H5'1": 'H1', 'RGN-H3T': 'HO', 'CYS-HB3': 'H1', 'CYS-HB2': 'H1', "RC-O5'": 'OS', 'DT-P': 'P',
"RU-C2'": 'CT',
'GLH-HE2': 'HO', "RGN-H3'": 'H1', "RGN-O4'": 'OS', 'GLN-HG2': 'HC', 'CYX-H': 'H', 'CYX-N': 'N',
'CYX-O': 'O',
'CYX-C': 'C', 'PRO-HG2': 'HC', 'PRO-HG3': 'HC', "RAN-O2'": 'OH', 'LEU-HD21': 'HC', 'LEU-HD23': 'HC',
'LEU-HD22': 'HC',
"RAN-H3'": 'H1', 'RA-H62': 'H', "RC3-H4'": 'H1', 'RC3-H41': 'H', 'HIE-HB2': 'HC', 'HIE-HB3': 'HC',
'DC3-C4': 'CA',
'DC3-C5': 'CM', 'DC3-C6': 'CM', 'DC5-H6': 'H4', 'DC5-H5': 'HA', 'RU5-H5T': 'HO', "RA5-C3'": 'CT',
'RAN-H3T': 'HO',
"RC5-O4'": 'OS', 'HID-HD2': 'H4', 'HID-HD1': 'H', 'MET-HA': 'H1', 'PHE-HB2': 'HC', "DC3-O5'": 'OS',
'RGN-H1': 'H',
"DTN-C4'": 'CT', 'RGN-H8': 'H5', "RA3-H4'": 'H1', "DC-C4'": 'CT', "RC-H1'": 'H2', 'RA-C5': 'CB',
'RA-C6': 'CA',
'RA-C2': 'CQ', "RA3-C3'": 'CT', 'RA-C8': 'CK', 'GLH-CB': 'CT', 'GLH-CA': 'CT', 'GLH-CG': 'CT',
'GLH-CD': 'C',
"DC3-C3'": 'CT', 'RU5-N1': 'N*', 'RU5-N3': 'NA', "DC-C3'": 'CT', "RGN-C4'": 'CT', "RG3-H5'1": 'H1',
"RG3-H5'2": 'H1',
"RC5-HO'2": 'HO', "RG-O2'": 'OH', 'LEU-HA': 'H1', 'ARG-NH1': 'N2', 'LEU-HG': 'HC', "RCN-H3'": 'H1',
"DT-H3'": 'H1',
"RC-C1'": 'CT', 'RG-O2P': 'O2', 'RU3-H3T': 'HO', "DT3-H3'": 'H1', 'RCN-H3T': 'HO', "DT-O4'": 'OS',
'ASN-HA': 'H1',
'RU-O1P': 'O2', "DGN-H1'": 'H2', "DG3-H3'": 'H1', "DG5-C4'": 'CT', 'LYS-HE3': 'HP', "RUN-C4'": 'CT',
'TRP-HB2': 'HC',
'TRP-HB3': 'HC', "DC5-H3'": 'H1', 'LYN-HD3': 'HC', 'LYN-HD2': 'HC', 'GLY-CA': 'CT', 'RG3-O1P': 'O2',
'GLN-C': 'C',
'RA-O1P': 'O2', 'GLN-H': 'H', 'GLN-O': 'O', 'GLN-N': 'N', 'GLN-CA': 'CT', 'RG5-C5': 'CB',
'LEU-HB2': 'HC',
'DG3-H3T': 'HO', 'DT3-O2P': 'O2', "RC3-C1'": 'CT', 'GLN-CG': 'CT', 'ALA-HB2': 'HC', 'ALA-HB3': 'HC',
'ALA-HB1': 'HC',
'PRO-HD3': 'H1', 'PRO-HD2': 'H1', 'GLU-HA': 'H1', 'RU-P': 'P', "RAN-O5'": 'OH', "DA3-H4'": 'H1',
'VAL-CA': 'CT',
"RU5-H4'": 'H1', "RAN-C5'": 'CT', 'VAL-CB': 'CT', 'RA-P': 'P', "RC5-O5'": 'OH', 'RA3-H3T': 'HO',
'HID-HE1': 'H5',
"DC5-H2'2": 'HC', "DC5-H2'1": 'HC', "DT-C4'": 'CT', 'DC3-O2P': 'O2', "DA-H4'": 'H1', "RC5-H5'2": 'H1',
"RC5-H5'1": 'H1', 'HID-CE1': 'CR', "DTN-C5'": 'CT', 'DT-O4': 'O', 'DT5-C2': 'C', "RA3-H3'": 'H1',
'DT5-C4': 'C',
'DT5-C7': 'CT', 'DT-O2': 'O', 'DTN-H3': 'H', 'DTN-H6': 'H4', "DG3-C1'": 'CT', 'GLU-OE2': 'O2',
'GLU-OE1': 'O2',
"RUN-H5'1": 'H1', "RUN-H5'2": 'H1', "DT5-H3'": 'H1', "DCN-H1'": 'H2', 'RG-O1P': 'O2', 'ALA-HA': 'H1',
'DG-C5': 'CB',
'ILE-HB': 'HC', 'ILE-HA': 'H1', "RGN-C3'": 'CT', 'TYR-HB3': 'HC', 'RCN-H5': 'HA', 'RCN-H6': 'H4',
'HID-CG': 'CC',
'HID-CB': 'CT', 'HID-CA': 'CT', 'ASN-HB2': 'HC', 'ASN-HB3': 'HC', 'RUN-H6': 'H4', 'RUN-H5': 'HA',
'RUN-H3': 'H',
"DG-H5'2": 'H1', 'RC3-H5': 'HA', 'RG3-O2P': 'O2', "DTN-O3'": 'OH', 'SER-CA': 'CT', 'SER-CB': 'CT',
"DT5-C2'": 'CT',
"DG5-O5'": 'OH', 'DGN-H21': 'H', 'DGN-H22': 'H', "DG-C1'": 'CT', "RG-H4'": 'H1', "DT3-O5'": 'OS',
"DG5-C5'": 'CT',
'LYN-HE2': 'HP', 'LYN-HE3': 'HP', "RG5-C2'": 'CT', 'DG3-H21': 'H', 'DG3-H22': 'H', 'MET-HE3': 'H1',
"RG3-O2'": 'OH',
"DT5-H1'": 'H2', "RU3-C1'": 'CT', 'VAL-CG1': 'CT', 'RA3-C8': 'CK', 'RA3-C4': 'CB', 'RA3-C5': 'CB',
'RA3-C6': 'CA',
'RA3-C2': 'CQ', "RG5-HO'2": 'HO', 'DA-C2': 'CQ', 'DC3-P': 'P', 'DA-C6': 'CA', "DA5-C4'": 'CT',
'DA-C5': 'CB',
"DC3-H5'2": 'H1', 'DA-C8': 'CK', "DT3-C5'": 'CT', "RCN-C3'": 'CT', "RC3-H2'1": 'H1', "RG5-H5'2": 'H1',
"RG5-H5'1": 'H1', 'GLN-HG3': 'HC', "RGN-H1'": 'H2', "RC5-C4'": 'CT', 'DT-N1': 'N*', 'RC5-C2': 'C',
'RC5-C4': 'CA',
'RC5-C5': 'CM', 'RC5-C6': 'CM', "RAN-O4'": 'OS', 'RA5-H8': 'H5', 'RG5-C8': 'CK', 'RG5-C4': 'CB',
'RA5-H2': 'H5',
'RG5-C6': 'C', 'GLN-CB': 'CT', 'GLN-CD': 'C', 'RG5-C2': 'CA', "RUN-H1'": 'H2', 'GLH-HG2': 'HC',
'GLH-HG3': 'HC',
"RG5-H4'": 'H1', "RAN-C4'": 'CT', "RG3-H1'": 'H2', 'RAN-C2': 'CQ', 'RAN-C6': 'CA', 'RAN-C5': 'CB',
'RAN-C4': 'CB',
'HIE-HA': 'H1', 'RC3-H6': 'H4', "DG-H5'1": 'H1', 'RAN-C8': 'CK', "RGN-H5'2": 'H1', 'LYS-HB3': 'HC',
"RGN-H5'1": 'H1',
"DT-C5'": 'CT', 'LEU-HB3': 'HC', 'ASN-OD1': 'O', "RC5-H3'": 'H1', "DC3-O3'": 'OH', 'LYN-HA': 'H1',
'HID-CD2': 'CV',
'ILE-CG2': 'CT', 'DT-N3': 'NA', 'ILE-CG1': 'CT', "DC-H1'": 'H2', "DCN-H5'1": 'H1', "DCN-H5'2": 'H1',
"RA3-C1'": 'CT',
"RG-HO'2": 'HO', 'ASN-HD21': 'H', 'CYS-CB': 'CT', 'CYS-CA': 'CT', 'ASN-HD22': 'H', 'DT3-N1': 'N*',
'DT3-N3': 'NA',
"RGN-C2'": 'CT', "DCN-O3'": 'OH', 'PHE-CE1': 'CA', 'PHE-CE2': 'CA', 'DGN-H3T': 'HO', "RCN-H1'": 'H2',
'DT5-N1': 'N*',
"DGN-H3'": 'H1', "RU-O3'": 'OS', 'RG3-H8': 'H5', 'RG3-H1': 'H', "RG3-O3'": 'OH', 'TYR-H': 'H',
'TYR-O': 'O',
'TYR-N': 'N', "RA5-H2'1": 'H1', 'TYR-C': 'C', 'VAL-O': 'O', "DG3-H1'": 'H2', "DT3-O4'": 'OS',
'LYN-HB3': 'HC',
'LYN-HB2': 'HC', "RG5-C3'": 'CT', "DT5-C1'": 'CT', 'THR-CG2': 'CT', 'DG-N9': 'N*', 'DG-N3': 'NC',
'DG-N2': 'N2',
'DG-N1': 'NA', 'DG-N7': 'NB', 'DAN-H62': 'H', 'DAN-H61': 'H', "DA-O3'": 'OS', 'TRP-N': 'N', 'TRP-O': 'O',
'TRP-H': 'H',
"DA5-C5'": 'CT', 'MET-CB': 'CT', 'TRP-C': 'C', "DT3-C4'": 'CT', "RU-O5'": 'OS', 'DA-N7': 'NB',
'DA-N6': 'N2',
'DA-N1': 'NC', 'DA-N3': 'NC', "RCN-C2'": 'CT', 'DA-N9': 'N*', 'CYM-HA': 'H1', 'CYM-HN': 'H',
'RAN-H8': 'H5',
'PRO-HB3': 'HC', 'PRO-HB2': 'HC', 'RA-N6': 'N2', 'DC3-C2': 'C', "RC5-C5'": 'CT', 'DA5-H8': 'H5',
'RA-N3': 'NC',
'DA5-H2': 'H5', 'RA-H61': 'H', "DT-H2'1": 'HC', "DT-H2'2": 'HC', "RUN-O3'": 'OH', "RC3-C2'": 'CT',
"RU5-C1'": 'CT',
"RG5-H3'": 'H1', 'TRP-CD1': 'CW', 'RU3-O2P': 'O2', 'DC-N4': 'N2', 'DC-N3': 'NC', 'DC-N1': 'N*',
'HIE-H': 'H',
'HIE-N': 'N', 'HIE-O': 'O', 'RC3-O2': 'O', 'GLY-HA3': 'H1', 'GLY-HA2': 'H1', 'RC3-H42': 'H',
"DC5-C1'": 'CT',
'DC-O1P': 'O2', "DA3-C1'": 'CT', 'RC-C2': 'C', 'RC-C6': 'CM', 'RC-C4': 'CA', 'RC-C5': 'CM',
"DGN-H2'2": 'HC',
"DGN-H2'1": 'HC', "RA3-H1'": 'H2', "RU3-O2'": 'OH', "RC-H4'": 'H1', 'RC-H41': 'H', 'RC-H42': 'H',
"RGN-C1'": 'CT',
'RA-H2': 'H5', 'RU5-C6': 'CM', 'RU5-C4': 'C', 'RU5-C5': 'CM', 'RU5-C2': 'C', 'RA-H8': 'H5',
"DCN-H3'": 'H1',
'DT3-O4': 'O', 'DT3-O2': 'O', "RC5-H2'1": 'H1', 'CYS-H': 'H', 'CYS-O': 'O', 'CYS-N': 'N', 'RCN-N3': 'NC',
'CYS-C': 'C',
'RCN-N1': 'N*', 'RCN-N4': 'N2', 'CYX-HA': 'H1', 'LYS-HA': 'H1', 'GLH-HA': 'H1', 'PHE-CD2': 'CA',
'DCN-H3T': 'HO',
'PHE-CD1': 'CA', "DG5-H2'2": 'HC', 'DT5-O4': 'O', "DG5-H2'1": 'HC', 'DT5-O2': 'O', "RG5-H2'1": 'H1',
"DGN-H4'": 'H1',
"RGN-HO'2": 'HO', "DA-H2'2": 'HC', "DA-H2'1": 'HC', 'HIP-HD2': 'H4', 'HIP-HD1': 'H', "DAN-C3'": 'CT',
'PHE-H': 'H',
'PHE-O': 'O', 'PHE-N': 'N', "RA3-O4'": 'OS', 'PHE-C': 'C', 'DT5-N3': 'NA', 'DA3-C6': 'CA',
"RG3-O4'": 'OS',
"DT3-H2'2": 'HC', "DT3-H2'1": 'HC', 'DA3-C5': 'CB', 'RG-H22': 'H', 'RG-H21': 'H', "DC-O4'": 'OS',
'LYS-HZ2': 'H',
'LYS-HZ3': 'H', "RA-C1'": 'CT', 'RU3-H5': 'HA', 'RU3-H6': 'H4', 'RU3-H3': 'H', 'DT5-H71': 'HC',
'DT5-H72': 'HC',
'DT5-H73': 'HC', 'MET-HG2': 'H1', "RG3-C4'": 'CT', "DA5-C2'": 'CT', "DT3-H5'2": 'H1', "RCN-C1'": 'CT',
'DA-O2P': 'O2',
'DG-O6': 'O', "RC5-C2'": 'CT', 'DGN-N9': 'N*', 'SER-OG': 'OH', 'DGN-N7': 'NB', 'DGN-N1': 'NA',
'DGN-N2': 'N2',
'DGN-N3': 'NC', 'DA3-H2': 'H5', 'DA3-H8': 'H5', "RU-H1'": 'H2', "DGN-O3'": 'OH', 'CYS-SG': 'SH',
'RG-H1': 'H',
'TRP-HA': 'H1', 'RG3-H3T': 'HO', 'RG-H8': 'H5', "RC3-C3'": 'CT', 'TRP-HZ2': 'HA', "RUN-H3'": 'H1',
"RU5-C2'": 'CT',
"DC5-C2'": 'CT', 'RG5-O6': 'O', 'RUN-H3T': 'HO', 'MET-HB2': 'HC', 'MET-HB3': 'HC', 'RG5-H21': 'H',
'RG5-H22': 'H',
"RG3-H3'": 'H1', 'RC3-N1': 'N*', 'RC3-N3': 'NC', 'RC3-N4': 'N2', "RC5-H1'": 'H2', 'LYN-NZ': 'N3',
"DAN-H5'1": 'H1',
'THR-HA': 'H1', 'THR-HB': 'H1', "RU3-O3'": 'OH', "RU3-H4'": 'H1', "DG-H4'": 'H1', "RCN-H2'1": 'H1',
'RA-C4': 'CB',
'TYR-CE1': 'CA', 'TYR-CE2': 'CA', "DC-H3'": 'H1', 'DT3-H3': 'H', 'DT3-H6': 'H4', "DCN-C5'": 'CT',
'DC3-O1P': 'O2',
'RCN-O2': 'O', "DCN-O5'": 'OH', 'THR-OG1': 'OH', 'DGN-H5T': 'HO', 'RGN-C5': 'CB', "RA-C3'": 'CT',
'RGN-C6': 'C',
"DC5-H5'1": 'H1', "DC5-H5'2": 'H1', 'HIP-HE1': 'H5', 'HIP-HE2': 'H', "DG-C2'": 'CT', 'HID-HA': 'H1',
"RA3-O5'": 'OS',
'TRP-CE2': 'CN', 'TRP-CE3': 'CA', "RG3-O5'": 'OS', "RG-H3'": 'H1', 'DA-O1P': 'O2', "DG5-H1'": 'H2',
"RA5-H4'": 'H1',
"DG3-H5'2": 'H1', "DG3-H5'1": 'H1', 'RG3-P': 'P', 'ILE-CB': 'CT', 'ILE-CA': 'CT', "DT5-H4'": 'H1',
"DAN-H4'": 'H1',
"RG5-C1'": 'CT', 'RA3-N3': 'NC', "RG3-C5'": 'CT', "DA5-C3'": 'CT', 'RA5-H62': 'H', 'GLH-HB3': 'HC',
'RA5-H61': 'H',
"DGN-H5'1": 'H1', "DGN-H5'2": 'H1', 'PRO-C': 'C', 'PRO-O': 'O', 'PRO-N': 'N', 'RA5-C8': 'CK',
"DC5-C3'": 'CT',
'TYR-CA': 'CT', 'TYR-CB': 'CT', 'TYR-CG': 'CA', 'DG-H1': 'H', 'RA5-C2': 'CQ', "RGN-H4'": 'H1',
"RC5-C3'": 'CT',
'DG-H8': 'H5', 'TYR-CZ': 'C', 'DGN-O6': 'O', "RU5-C3'": 'CT', 'RA5-C6': 'CA', 'RG5-N1': 'NA',
"RC3-O4'": 'OS',
'RA5-C5': 'CB', 'LEU-H': 'H', 'ASP-C': 'C', 'LEU-N': 'N', 'LEU-O': 'O', 'THR-HG22': 'HC', 'ASP-H': 'H',
'LEU-C': 'C',
'ASP-N': 'N', 'ASP-O': 'O', "RC3-C4'": 'CT', 'RA3-N9': 'N*', 'RA3-N7': 'NB', 'RA3-N6': 'N2',
"RA-HO'2": 'HO',
"RUN-H4'": 'H1', 'RA3-N1': 'NC', 'GLH-HB2': 'HC', 'ASH-H': 'H', 'ASH-N': 'N', 'ASH-O': 'O',
'RG5-N9': 'N*',
'RG3-H22': 'H', 'ASH-C': 'C', "RG5-H1'": 'H2', 'RG3-H21': 'H', "RAN-C1'": 'CT', 'RG5-N2': 'N2',
'RA5-C4': 'CB',
"DT5-H5'1": 'H1', "RA-H2'1": 'H1', 'RAN-N9': 'N*', 'RAN-N1': 'NC', 'RAN-N3': 'NC', "DA5-O5'": 'OH',
'RAN-N6': 'N2',
'RAN-N7': 'NB', "DC-O3'": 'OS', "RAN-HO'2": 'HO', "DA3-C3'": 'CT', 'DC5-C2': 'C', 'DC3-N4': 'N2',
'DC5-C6': 'CM',
'DC5-C4': 'CA', 'DC5-C5': 'CM', "RA3-C4'": 'CT', "RC-O3'": 'OS', 'CYS-HA': 'H1', 'CYS-HG': 'HS',
'HIP-CE1': 'CR',
'SER-H': 'H', 'SER-N': 'N', 'SER-O': 'O', 'SER-C': 'C', 'TYR-CD2': 'CA', 'TYR-CD1': 'CA', "DC-H4'": 'H1',
'TRP-NE1': 'NA', "DCN-C4'": 'CT', 'DC-H42': 'H', 'DC-H41': 'H', 'ASP-OD2': 'O2', 'ASP-OD1': 'O2',
"RU3-H2'1": 'H1',
'ASH-HD2': 'HO', "DCN-O4'": 'OS', 'GLN-HE21': 'H', "RG5-O4'": 'OS', 'GLN-HE22': 'H', "DG-H2'1": 'HC',
"DG-H2'2": 'HC',
'LYS-NZ': 'N3', 'ILE-C': 'C', 'PHE-HD1': 'HA', "RU5-HO'2": 'HO', 'PHE-HD2': 'HA', "RA-C2'": 'CT',
'ILE-H': 'H',
"RGN-H2'1": 'H1', 'ILE-O': 'O', 'DC5-H5T': 'HO', "RG-C3'": 'CT', "RA3-O2'": 'OH', 'TRP-HE1': 'H',
'TRP-HE3': 'HA',
'RA3-O2P': 'O2', 'RG3-C8': 'CK', 'RG3-C2': 'CA', 'RG3-C6': 'C', 'RG3-C4': 'CB', 'RG3-C5': 'CB',
"DC3-C2'": 'CT',
'DT3-H3T': 'HO', 'ARG-NH2': 'N2', 'TYR-HA': 'H1', 'RGN-H5T': 'HO', 'DAN-H5T': 'HO', "RUN-O2'": 'OH',
"RA5-H1'": 'H2',
'TYR-HH': 'HO', 'HIP-ND1': 'NA', 'DG3-H8': 'H5', 'DG3-H1': 'H', "RA5-O2'": 'OH', 'ARG-C': 'C',
'ARG-N': 'N',
'ARG-O': 'O', 'ARG-H': 'H', 'DT5-H5T': 'HO', "DA3-H2'2": 'HC', "DA3-H2'1": 'HC', 'RC-H6': 'H4',
'DA3-N1': 'NC',
'DA3-N3': 'NC', "RU5-C4'": 'CT', 'DA3-N7': 'NB', 'DA3-N6': 'N2', 'DA3-N9': 'N*', 'DC-O2P': 'O2',
'TRP-CZ3': 'CA',
'TRP-CZ2': 'CA', 'RA3-P': 'P', "RC3-O5'": 'OS', "RG-H5'1": 'H1', "RG-H5'2": 'H1', 'RG-N9': 'N*',
'DAN-C5': 'CB',
'RU-O2P': 'O2', 'RG-N1': 'NA', 'RG-N3': 'NC', 'RG-N2': 'N2', 'RG-N7': 'NB', "DTN-H5'2": 'H1',
"DTN-H5'1": 'H1',
"RC3-C5'": 'CT', 'DC3-N3': 'NC', 'HIP-H': 'H', 'DC3-N1': 'N*', 'HIP-O': 'O', 'HIP-N': 'N', 'HIP-C': 'C',
'MET-C': 'C',
'RUN-H5T': 'HO', "RU-O2'": 'OH', 'RU-H3': 'H', "DA3-O4'": 'OS', "RU5-O4'": 'OS', 'MET-H': 'H',
'DAN-C8': 'CK',
"DT-C1'": 'CT', "DA5-H3'": 'H1', "DA3-C4'": 'CT', 'HID-O': 'O', 'HID-N': 'N', 'HID-H': 'H', 'HID-C': 'C',
"DT5-C3'": 'CT', "DAN-C2'": 'CT', 'RC-O2P': 'O2', "DG5-O4'": 'OS', "RC-O2'": 'OH', 'HIP-CD2': 'CW',
"DAN-O5'": 'OH',
'RU3-O1P': 'O2', 'PHE-HA': 'H1', 'DC-C4': 'CA', 'RC-N4': 'N2', 'DC-C6': 'CM', 'RC-N1': 'N*',
'PHE-HZ': 'HA',
"DT-H5'2": 'H1', 'LYS-CG': 'CT', 'LYS-CD': 'CT', "DT-H5'1": 'H1', 'LYS-CB': 'CT', 'CYX-SG': 'S',
'LYS-CA': 'CT',
'DTN-H73': 'HC', 'DTN-H72': 'HC', 'DTN-H71': 'HC', 'DC-P': 'P', "RG5-O5'": 'OH', "RC-H2'1": 'H1',
'DG-P': 'P',
'GLH-OE2': 'OH', 'GLH-OE1': 'O', 'PHE-HE2': 'HA', 'PHE-HE1': 'HA', "DA3-H5'2": 'H1', "RA-C5'": 'CT',
'ARG-HH22': 'H',
'ARG-HH21': 'H', "RG-C2'": 'CT', 'ALA-CB': 'CT', 'ALA-CA': 'CT', "RA3-O3'": 'OH', 'TRP-HD1': 'H4',
"RA3-H2'1": 'H1',
"RG-H1'": 'H2', "DC5-H4'": 'H1', "DG5-H3'": 'H1', "RUN-H2'1": 'H1', "RA-O5'": 'OS', 'DC5-H41': 'H',
'DC5-H42': 'H',
'SER-HA': 'H1', "DT5-H5'2": 'H1', 'SER-HG': 'HO', 'DA3-H62': 'H', 'DA3-H61': 'H', "RUN-HO'2": 'HO',
"RU5-H5'1": 'H1',
"RU-H2'1": 'H1', "RU5-H5'2": 'H1', 'DG3-O2P': 'O2', "DA5-C1'": 'CT', 'HIP-NE2': 'NA', 'DT3-P': 'P',
'DG5-H1': 'H',
'DG5-H8': 'H5', "RA5-O3'": 'OS', 'LYN-C': 'C', 'LYN-O': 'O', 'LYN-N': 'N', 'LYN-H': 'H', "DA-H5'1": 'H1',
"DA-H5'2": 'H1', "RC5-C1'": 'CT', 'RG5-N7': 'NB', "RAN-H5'1": 'H1', "RAN-H5'2": 'H1', "RU5-C5'": 'CT',
"RGN-O3'": 'OH',
'CYM-CA': 'CT', 'DGN-C8': 'CK', 'DGN-C6': 'C', 'DGN-C5': 'CB', 'DGN-C4': 'CB', 'DGN-C2': 'CA',
"RU-C5'": 'CT',
'RG-O6': 'O', "DT5-O3'": 'OS', 'RA3-H8': 'H5', 'RA3-H2': 'H5', "RA5-H3'": 'H1', 'RG5-H8': 'H5',
"DT-C2'": 'CT',
'RG5-H1': 'H', 'TYR-OH': 'OH', "RAN-H4'": 'H1', 'MET-CA': 'CT', "RAN-C3'": 'CT', 'MET-CE': 'CT',
'MET-CG': 'CT',
'RC3-C6': 'CM', 'RC3-C4': 'CA', 'RC3-C5': 'CM', 'RC3-C2': 'C', 'DAN-H8': 'H5', "DA3-O5'": 'OS',
'HID-ND1': 'NA',
'DAN-H2': 'H5', "RU5-O5'": 'OH', 'DC3-H42': 'H', "DA5-H5'2": 'H1', "DA5-H5'1": 'H1', 'DC3-H41': 'H',
"DA-C4'": 'CT',
"DC3-H4'": 'H1', 'RG5-N3': 'NC', 'DTN-C7': 'CT', "DA3-C5'": 'CT', 'MET-SD': 'S', 'RC5-H5': 'HA',
'DA3-C4': 'CB',
'RC5-H6': 'H4', 'DA3-C2': 'CQ', 'DA3-C8': 'CK', 'MET-HG3': 'H1', 'HIE-NE2': 'NA', 'DC3-H5': 'HA',
"DT3-H5'1": 'H1',
"DAN-C1'": 'CT', 'DC3-H6': 'H4', 'GLN-OE1': 'O', 'ARG-HE': 'H', 'DC5-O2': 'O', 'ARG-HA': 'H1',
'LYN-CE': 'CT',
'LYN-CD': 'CT', 'LYN-CG': 'CT', 'RGN-C2': 'CA', 'LYN-CA': 'CT', 'RGN-C4': 'CB', 'LYN-CB': 'CT',
"RA5-C4'": 'CT',
'RGN-C8': 'CK', 'RC-O2': 'O', "RCN-O5'": 'OH', 'RUN-N1': 'N*', 'RUN-N3': 'NA', "RG5-O2'": 'OH',
'RC-O1P': 'O2',
'HIE-C': 'C', "RA-C4'": 'CT', 'ASN-CG': 'C', 'ARG-HH12': 'H', 'ASN-CA': 'CT', 'ASN-CB': 'CT',
'ARG-HH11': 'H',
'ARG-CD': 'CT', "RG-C1'": 'CT', 'TYR-HD1': 'HA', "DG5-O3'": 'OS', 'TYR-HD2': 'HA', 'ILE-N': 'N',
"RC3-HO'2": 'HO',
"DG3-O3'": 'OH', "DG3-H4'": 'H1', "RA-O4'": 'OS', 'ASH-OD2': 'OH', 'ASH-OD1': 'O', 'DG5-H22': 'H',
'DG5-H21': 'H',
"DT3-H4'": 'H1', "DT3-C3'": 'CT', 'DAN-H3T': 'HO', "DGN-C1'": 'CT', "RG5-C4'": 'CT', "RG-O4'": 'OS',
'DG5-O6': 'O',
"RA5-O4'": 'OS', "RCN-C5'": 'CT', "DAN-H3'": 'H1'}
bond_structure = {
'GLH': {'C': ['O'], 'OE2': ['HE2'], 'CA': ['HA', 'CB', 'C'], 'CG': ['HG2', 'HG3', 'CD'], 'CD': ['OE1', 'OE2'],
'CB': ['HB2', 'HB3', 'CG'], 'N': ['H', 'CA']},
'ILE': {'C': ['O'], 'CB': ['HB', 'CG2', 'CG1'], 'CA': ['HA', 'CB', 'C'], 'N': ['H', 'CA'],
'CD1': ['HD11', 'HD12', 'HD13'], 'CG1': ['HG12', 'HG13', 'CD1'], 'CG2': ['HG21', 'HG22', 'HG23']},
'DTN': {"C5'": ["H5'1", "H5'2", "C4'"], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'],
"C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['H3', 'C2'], 'C7': ['H71', 'H72', 'H73'],
'C6': ['H6', 'C5'], 'C5': ['C7', 'C4'], 'C4': ['O4', 'N3']},
'GLN': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['HG2', 'HG3', 'CD'],
'CD': ['OE1', 'NE2'], 'N': ['H', 'CA'], 'NE2': ['HE21', 'HE22']},
'DG': {'N3': ['C4'], 'N9': ['C8', 'C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N9', "C2'"],
"O4'": ["C1'"], "C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['N2', 'N3'], 'N1': ['H1', 'C2'], 'N2': ['H21', 'H22'], 'N7': ['C5'],
'C6': ['O6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'DA3': {"C2'": ["H2'1", "H2'2"], 'N3': ['C4'], 'N9': ['C8', 'C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"],
"O3'": ['H3T'], "C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], "C5'": ["H5'1", "H5'2", "C4'"],
'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"], 'N6': ['H61', 'H62'], 'C2': ['H2', 'N3'],
'N1': ['C2'], 'N7': ['C5'], 'C6': ['N6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'DC': {"O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"],
"C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['C2'], 'N4': ['H41', 'H42'],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['N4', 'N3']},
'DA': {"C2'": ["H2'1", "H2'2"], 'N3': ['C4'], 'N9': ['C8', 'C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"],
"C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], "C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"],
"C4'": ["H4'", "O4'", "C3'"], 'N6': ['H61', 'H62'], 'C2': ['H2', 'N3'], 'N1': ['C2'], 'N7': ['C5'],
'C6': ['N6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'GLY': {'CA': ['HA2', 'HA3', 'C'], 'C': ['O'], 'N': ['H', 'CA']},
'RCN': {"C5'": ["H5'1", "H5'2", "C4'"], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'],
"C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['O2'], 'N1': ['C6', 'C2'], "O2'": ["HO'2"], 'N3': ['C2'],
'N4': ['H41', 'H42'],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['N4', 'N3']},
'HIP': {'C': ['O'], 'CD2': ['HD2'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['ND1', 'CD2'],
'N': ['H', 'CA'], 'CE1': ['HE1', 'NE2'], 'ND1': ['HD1', 'CE1'], 'NE2': ['HE2', 'CD2']},
'TYR': {'C': ['O'], 'CD2': ['HD2'], 'OH': ['HH'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'],
'CG': ['CD1', 'CD2'], 'N': ['H', 'CA'], 'CZ': ['OH', 'CE2'], 'CD1': ['HD1', 'CE1'], 'CE1': ['HE1', 'CZ'],
'CE2': ['HE2', 'CD2']},
'RU3': {"O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'], "C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"],
"C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['H3', 'C2'], "O2'": ["HO'2"],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['O4', 'N3']},
'DT': {"O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"],
"C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['H3', 'C2'], 'C7': ['H71', 'H72', 'H73'],
'C6': ['H6', 'C5'], 'C5': ['C7', 'C4'], 'C4': ['O4', 'N3']},
'ALA': {'CB': ['HB1', 'HB2', 'HB3'], 'CA': ['HA', 'CB', 'C'], 'C': ['O'], 'N': ['H', 'CA']},
'GLU': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['HG2', 'HG3', 'CD'],
'CD': ['OE1', 'OE2'], 'N': ['H', 'CA']},
'RGN': {"C5'": ["H5'1", "H5'2", "C4'"], 'N3': ['C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'],
"C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"], 'N9': ['C8', 'C4'],
"C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['N2', 'N3'], 'N1': ['H1', 'C2'], 'N2': ['H21', 'H22'], 'N7': ['C5'],
"O2'": ["HO'2"], 'C6': ['O6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'RU5': {"C5'": ["H5'1", "H5'2", "C4'"], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N1', "C2'"],
"O4'": ["C1'"], 'H5T': ["O5'"], "C4'": ["H4'", "O4'", "C3'"], "C2'": ["H2'1", "O2'"], 'C2': ['O2'],
'N1': ['C6', 'C2'], 'N3': ['H3', 'C2'], "O2'": ["HO'2"], 'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'],
'C4': ['O4', 'N3']},
'DCN': {"C5'": ["H5'1", "H5'2", "C4'"], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'],
"C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['C2'], 'N4': ['H41', 'H42'],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['N4', 'N3']},
'RU': {"O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"],
"C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['H3', 'C2'], "O2'": ["HO'2"],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['O4', 'N3']},
'ASP': {'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['OD1', 'OD2'], 'C': ['O'], 'N': ['H', 'CA']},
'SER': {'OG': ['HG'], 'CB': ['HB2', 'HB3', 'OG'], 'CA': ['HA', 'CB', 'C'], 'C': ['O'], 'N': ['H', 'CA']},
'LYS': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['HG2', 'HG3', 'CD'],
'CE': ['HE2', 'HE3', 'NZ'], 'CD': ['HD2', 'HD3', 'CE'], 'NZ': ['HZ1', 'HZ2', 'HZ3'], 'N': ['H', 'CA']},
'RAN': {"C5'": ["H5'1", "H5'2", "C4'"], "C2'": ["H2'1", "O2'"], 'N3': ['C4'], "O5'": ["C5'"],
"C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'], "C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"],
'N9': ['C8', 'C4'], "C4'": ["H4'", "O4'", "C3'"], 'N6': ['H61', 'H62'], 'C2': ['H2', 'N3'], 'N1': ['C2'],
'N7': ['C5'], "O2'": ["HO'2"], 'C6': ['N6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'DAN': {"C5'": ["H5'1", "H5'2", "C4'"], "C2'": ["H2'1", "H2'2"], 'N3': ['C4'], "O5'": ["C5'"],
"C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'], "C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"],
'N9': ['C8', 'C4'], "C4'": ["H4'", "O4'", "C3'"], 'N6': ['H61', 'H62'], 'C2': ['H2', 'N3'], 'N1': ['C2'],
'N7': ['C5'], 'C6': ['N6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'CYX': {'CB': ['HB2', 'HB3', 'SG'], 'CA': ['HA', 'CB', 'C'], 'C': ['O'], 'N': ['H', 'CA']},
'DGN': {"C5'": ["H5'1", "H5'2", "C4'"], 'N3': ['C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'],
"C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"], 'N9': ['C8', 'C4'],
"C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['N2', 'N3'], 'N1': ['H1', 'C2'], 'N2': ['H21', 'H22'], 'N7': ['C5'],
'C6': ['O6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'RG': {'N3': ['C4'], 'N9': ['C8', 'C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N9', "C2'"],
"O4'": ["C1'"], "C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['N2', 'N3'], 'N1': ['H1', 'C2'], 'N2': ['H21', 'H22'], 'N7': ['C5'],
"O2'": ["HO'2"], 'C6': ['O6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'HID': {'C': ['O'], 'CD2': ['HD2'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['ND1', 'CD2'],
'N': ['H', 'CA'], 'CE1': ['HE1', 'NE2'], 'ND1': ['HD1', 'CE1'], 'NE2': ['CD2']},
'RA': {"C2'": ["H2'1", "O2'"], 'N3': ['C4'], 'N9': ['C8', 'C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"],
"C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], "C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"],
"C4'": ["H4'", "O4'", "C3'"], 'N6': ['H61', 'H62'], 'C2': ['H2', 'N3'], 'N1': ['C2'], 'N7': ['C5'],
"O2'": ["HO'2"], 'C6': ['N6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'RC': {"O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"],
"C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['O2'], 'N1': ['C6', 'C2'], "O2'": ["HO'2"], 'N3': ['C2'],
'N4': ['H41', 'H42'],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['N4', 'N3']},
'LYN': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['HG2', 'HG3', 'CD'],
'CE': ['HE2', 'HE3', 'NZ'], 'CD': ['HD2', 'HD3', 'CE'], 'NZ': ['HZ2', 'HZ3'], 'N': ['H', 'CA']},
'ASH': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['OD1', 'OD2'], 'N': ['H', 'CA'],
'OD2': ['HD2']},
'ASN': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['OD1', 'ND2'], 'N': ['H', 'CA'],
'ND2': ['HD21', 'HD22']},
'CYM': {'CB': ['HB3', 'HB2', 'SG'], 'CA': ['HA', 'CB', 'C'], 'C': ['O'], 'N': ['HN', 'CA']},
'HIE': {'C': ['O'], 'CD2': ['HD2'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['ND1', 'CD2'],
'N': ['H', 'CA'], 'CE1': ['HE1', 'NE2'], 'ND1': ['CE1'], 'NE2': ['HE2', 'CD2']},
'CYS': {'CB': ['HB2', 'HB3', 'SG'], 'CA': ['HA', 'CB', 'C'], 'SG': ['HG'], 'C': ['O'], 'N': ['H', 'CA']},
'VAL': {'C': ['O'], 'CB': ['HB', 'CG1', 'CG2'], 'CA': ['HA', 'CB', 'C'], 'N': ['H', 'CA'],
'CG1': ['HG11', 'HG12', 'HG13'], 'CG2': ['HG21', 'HG22', 'HG23']},
'THR': {'C': ['O'], 'CB': ['HB', 'CG2', 'OG1'], 'CA': ['HA', 'CB', 'C'], 'OG1': ['HG1'], 'N': ['H', 'CA'],
'CG2': ['HG21', 'HG22', 'HG23']},
'DG3': {'N3': ['C4'], 'N9': ['C8', 'C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'],
"C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], "C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"],
"C4'": ["H4'", "O4'", "C3'"], "C2'": ["H2'1", "H2'2"], 'C2': ['N2', 'N3'], 'N1': ['H1', 'C2'],
'N2': ['H21', 'H22'], 'N7': ['C5'], 'C6': ['O6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'RA5': {"C5'": ["H5'1", "H5'2", "C4'"], "C2'": ["H2'1", "O2'"], 'N3': ['C4'], "O5'": ["C5'"],
"C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"],
'N9': ['C8', 'C4'],
"C4'": ["H4'", "O4'", "C3'"], 'N6': ['H61', 'H62'], 'C2': ['H2', 'N3'], 'N1': ['C2'], 'N7': ['C5'],
"O2'": ["HO'2"], 'C6': ['N6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'RA3': {"C2'": ["H2'1", "O2'"], 'N3': ['C4'], 'N9': ['C8', 'C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"],
"O3'": ['H3T'], "C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], "C5'": ["H5'1", "H5'2", "C4'"],
'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"], 'N6': ['H61', 'H62'], 'C2': ['H2', 'N3'],
'N1': ['C2'], 'N7': ['C5'], "O2'": ["HO'2"], 'C6': ['N6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'DG5': {"C5'": ["H5'1", "H5'2", "C4'"], 'N3': ['C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"],
"C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"], 'N9': ['C8', 'C4'],
"C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['N2', 'N3'], 'N1': ['H1', 'C2'], 'N2': ['H21', 'H22'], 'N7': ['C5'],
'C6': ['O6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'TRP': {'C': ['O'], 'CZ2': ['HZ2', 'CH2'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'],
'CG': ['CD1', 'CD2'],
'CH2': ['HH2', 'CZ3'], 'N': ['H', 'CA'], 'CE2': ['CZ2', 'CD2'], 'CE3': ['HE3', 'CD2'],
'CD1': ['HD1', 'NE1'],
'CZ3': ['HZ3', 'CE3'], 'NE1': ['HE1', 'CE2']},
'DC5': {"C5'": ["H5'1", "H5'2", "C4'"], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N1', "C2'"],
"O4'": ["C1'"], 'H5T': ["O5'"], "C4'": ["H4'", "O4'", "C3'"], "C2'": ["H2'1", "H2'2"], 'C2': ['O2'],
'N1': ['C6', 'C2'], 'N3': ['C2'], 'N4': ['H41', 'H42'], 'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'],
'C4': ['N4', 'N3']},
'DC3': {"O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'], "C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"],
"C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['C2'], 'N4': ['H41', 'H42'],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['N4', 'N3']},
'RG3': {'N3': ['C4'], 'N9': ['C8', 'C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'],
"C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], "C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"],
"C4'": ["H4'", "O4'", "C3'"], "C2'": ["H2'1", "O2'"], 'C2': ['N2', 'N3'], 'N1': ['H1', 'C2'],
'N2': ['H21', 'H22'], 'N7': ['C5'], "O2'": ["HO'2"], 'C6': ['O6', 'N1'], 'C5': ['C6', 'C4'],
'C8': ['H8', 'N7']},
'RUN': {"C5'": ["H5'1", "H5'2", "C4'"], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'],
"C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['H3', 'C2'], "O2'": ["HO'2"],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['O4', 'N3']},
'RG5': {"C5'": ["H5'1", "H5'2", "C4'"], 'N3': ['C4'], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"],
"C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"], 'N9': ['C8', 'C4'],
"C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['N2', 'N3'], 'N1': ['H1', 'C2'], 'N2': ['H21', 'H22'], 'N7': ['C5'],
"O2'": ["HO'2"], 'C6': ['O6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'DA5': {"C5'": ["H5'1", "H5'2", "C4'"], "C2'": ["H2'1", "H2'2"], 'N3': ['C4'], "O5'": ["C5'"],
"C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N9', "C2'"], "O4'": ["C1'"], 'H5T': ["O5'"],
'N9': ['C8', 'C4'],
"C4'": ["H4'", "O4'", "C3'"], 'N6': ['H61', 'H62'], 'C2': ['H2', 'N3'], 'N1': ['C2'], 'N7': ['C5'],
'C6': ['N6', 'N1'], 'C5': ['C6', 'C4'], 'C8': ['H8', 'N7']},
'RC5': {"C5'": ["H5'1", "H5'2", "C4'"], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N1', "C2'"],
"O4'": ["C1'"], 'H5T': ["O5'"], "C4'": ["H4'", "O4'", "C3'"], "C2'": ["H2'1", "O2'"], 'C2': ['O2'],
'N1': ['C6', 'C2'], "O2'": ["HO'2"], 'N3': ['C2'], 'N4': ['H41', 'H42'], 'C6': ['H6', 'C5'],
'C5': ['H5', 'C4'], 'C4': ['N4', 'N3']},
'PHE': {'C': ['O'], 'CD2': ['HD2'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['CD1', 'CD2'],
'N': ['H', 'CA'], 'CZ': ['HZ', 'CE2'], 'CD1': ['HD1', 'CE1'], 'CE1': ['HE1', 'CZ'], 'CE2': ['HE2', 'CD2']},
'RC3': {"O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'], "C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"],
"C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "O2'"], 'C2': ['O2'], 'N1': ['C6', 'C2'], "O2'": ["HO'2"], 'N3': ['C2'],
'N4': ['H41', 'H42'],
'C6': ['H6', 'C5'], 'C5': ['H5', 'C4'], 'C4': ['N4', 'N3']},
'MET': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['HG2', 'HG3', 'SD'],
'CE': ['HE1', 'HE2', 'HE3'], 'N': ['H', 'CA'], 'SD': ['CE']},
'LEU': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['HG', 'CD1', 'CD2'],
'N': ['H', 'CA'],
'CD1': ['HD11', 'HD12', 'HD13'], 'CD2': ['HD21', 'HD22', 'HD23']},
'ARG': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CG'], 'CA': ['HA', 'CB', 'C'], 'CG': ['HG2', 'HG3', 'CD'],
'NE': ['HE', 'CZ'], 'CD': ['HD2', 'HD3', 'NE'], 'CZ': ['NH1', 'NH2'], 'NH1': ['HH11', 'HH12'],
'NH2': ['HH21', 'HH22'], 'N': ['H', 'CA']},
'DT3': {"O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "O3'": ['H3T'], "C1'": ["H1'", 'N1', "C2'"], "O4'": ["C1'"],
"C5'": ["H5'1", "H5'2", "C4'"], 'P': ['O1P', 'O2P', "O5'"], "C4'": ["H4'", "O4'", "C3'"],
"C2'": ["H2'1", "H2'2"], 'C2': ['O2'], 'N1': ['C6', 'C2'], 'N3': ['H3', 'C2'], 'C7': ['H71', 'H72', 'H73'],
'C6': ['H6', 'C5'], 'C5': ['C7', 'C4'], 'C4': ['O4', 'N3']},
'PRO': {'C': ['O'], 'CB': ['HB2', 'HB3', 'CA'], 'CA': ['HA', 'C'], 'CG': ['HG2', 'HG3', 'CB'],
'CD': ['HD2', 'HD3', 'CG'], 'N': ['CD', 'CA']},
'DT5': {"C5'": ["H5'1", "H5'2", "C4'"], "O5'": ["C5'"], "C3'": ["H3'", "C2'", "O3'"], "C1'": ["H1'", 'N1', "C2'"],
"O4'": ["C1'"], 'H5T': ["O5'"], "C4'": ["H4'", "O4'", "C3'"], "C2'": ["H2'1", "H2'2"], 'C2': ['O2'],
'N1': ['C6', 'C2'], 'N3': ['H3', 'C2'], 'C7': ['H71', 'H72', 'H73'], 'C6': ['H6', 'C5'], 'C5': ['C7', 'C4'],
'C4': ['O4', 'N3']}}
masses = {'IP': 22.99, 'Rb': 85.47, 'HS': 1.008, 'HP': 1.008, 'HW': 1.008, 'Li': 6.94, 'HO': 1.008, 'BR': 79.9,
'HC': 1.008,
'HA': 1.008, 'N2': 14.01, 'C*': 12.01, 'N3': 14.01, 'LC': 12.01, 'NO': 14.01, 'NA': 14.01, 'NB': 14.01,
'NC': 14.01,
'O2': 16.0, 'I': 126.9, 'H': 1.008, 'NP': 14.01, 'C0': 40.08, 'N*': 14.01, 'K': 39.1, 'CK': 12.01,
'Cs': 132.91,
'C': 12.01, 'CN': 12.01, 'CM': 12.01, 'F': 19.0, 'CC': 12.01, 'CB': 12.01, 'CA': 12.01, 'O': 16.0, 'N': 14.01,
'P': 30.97, 'S': 32.06, 'CX': 12.0, 'IM': 35.45, 'CR': 12.01, 'CQ': 12.01, 'IB': 131.0, 'CW': 12.01,
'CV': 12.01,
'CU': 63.55, 'CT': 12.01, 'MG': 24.305, 'OH': 16.0, 'H2': 1.008, 'H3': 1.008, 'H1': 1.008, 'CY': 12.0,
'H4': 1.008,
'H5': 1.008, 'CD': 12.0, 'SH': 32.06, 'LO': 16.0, 'OW': 16.0, 'OS': 16.0, 'FE': 55.0}
|
brianjimenez/lightdock
|
lightdock/scoring/sd/data/amber.py
|
Python
|
gpl-3.0
| 125,069
|
[
"Amber"
] |
0d248d72b4f52ce2b07503e25b6aea1f5a3aaf8a4430ef68a9e8efe78b59a130
|
import os
import numpy as np
import sklearn.metrics as metrics
import sklearn.decomposition as decomp
import pdb
import matplotlib.pyplot as plt
from . import sc3_clustering_impl as sc
def load_dataset_tsv(fname, fgenes=None, flabels=None):
# check data filename
if not os.path.exists(fname):
raise Exception('File \'{0}\' not found.'.format(fname))
#print('Loading TSV data file from {0}.'.format(fname))
data = np.loadtxt(fname, delimiter='\t')
#print data.shape
gene_ids = np.arange(0, data.shape[0]).astype(np.str)
# Some scripts expect the gene ids (esp. for multitask learning of two or
# more datasets). If not specified, inform the user.
if fgenes is None:
print('Warning! Gene identifier file is not specified. Gene ids are now generated.')
else:
gene_ids = np.loadtxt(fgenes, delimiter='\t', dtype=np.str)
#print('Gene ids loaded for {0} genes.'.format(gene_ids.shape[0]))
if not np.unique(gene_ids).shape[0] == gene_ids.shape[0]:
print(('Warning! Gene ids are supposed to be unique. '
'Only {0} of {1} entries are unique.'.format(np.unique(gene_ids).shape[0], gene_ids.shape[0])))
labels = None
labels_2_ids = None
if flabels is not None:
#print('Loading labels from \'{0}\'.'.format(flabels))
# labels are handled as string values even though they are numerical
label_ids = np.loadtxt(flabels, delimiter='\t', dtype=np.str_)
assert label_ids.size == data.shape[1]
labels_2_ids = np.unique(label_ids)
unique_ind = np.arange(start=0, stop=labels_2_ids.shape[0])
labels = np.zeros((data.shape[1]), dtype=np.int)
#print('Found {0} unique labels:'.format(labels_2_ids.size))
#print labels_2_ids
for i in range(unique_ind.size):
inds = np.where(label_ids == labels_2_ids[i])[0]
labels[inds] = unique_ind[i]
#print('Label {0} occured {1} times. Assigned class is {2}.'.format(labels_2_ids[i], inds.size, unique_ind[i]))
return data, gene_ids, labels, labels_2_ids
def load_dataset(fname):
if not os.path.exists(fname):
raise Exception('File \'{0}\' not found.'.format(fname))
foo = np.load(fname)
data = foo['data']
gene_ids = foo['transcripts']
# look for labels
labels = None
if 'labels' in foo:
labels = foo['labels']
return data, gene_ids, labels
def normalize_kernel(K):
# A kernel K is normalized, iff K_ii = 1 \forall i
N = K.shape[0]
a = np.sqrt(np.diag(K)).reshape((N, 1))
if any(np.isnan(a)) or any(np.isinf(a)) or any(np.abs(a)<=1e-16):
print('Numerical instabilities.')
C = np.eye(N)
else:
b = 1. / a
C = b.dot(b.T)
return K * C
def center_kernel(K):
# Mean free in feature space
N = K.shape[0]
a = np.ones((N, N)) / np.float(N)
return K - a.dot(K) - K.dot(a) + a.dot(K.dot(a))
def kta_align_general(K1, K2):
"""
Computes the (empirical) alignment of two kernels K1 and K2
Definition 1: (Empirical) Alignment
a = <K1, K2>_Frob
b = sqrt( <K1, K1> <K2, K2>)
kta = a / b
with <A, B>_Frob = sum_ij A_ij B_ij = tr(AB')
"""
return K1.dot(K2.T).trace() / np.sqrt(K1.dot(K1.T).trace() * K2.dot(K2.T).trace())
def kta_align_binary(K, y):
# Computes the (empirical) alignment of kernel K1 and
# a corresponding binary label vector y \in \{+1, -1\}^m
m = np.int(y.size)
YY = y.reshape((m, 1)).dot(y.reshape((1, m)))
return K.dot(YY).trace() / (m * np.sqrt(K.dot(K.T).trace()))
def get_kernel(X, Y, type='linear', param=1.0):
"""Calculates a kernel given the data X and Y (dims x exms)"""
(Xdims, Xn) = X.shape
(Ydims, Yn) = Y.shape
kernel = 1.0
if type=='linear':
#print('Calculating linear kernel with size {0}x{1}.'.format(Xn, Yn))
kernel = X.T.dot(Y)
if type=='rbf':
#print('Calculating Gaussian kernel with size {0}x{1} and sigma2={2}.'.format(Xn, Yn, param))
Dx = (np.ones((Yn, 1)) * np.diag(X.T.dot(X)).reshape(1, Xn)).T
Dy = (np.ones((Xn, 1)) * np.diag(Y.T.dot(Y)).reshape(1, Yn))
kernel = Dx - 2.* np.array(X.T.dot(Y)) + Dy
kernel = np.exp(-kernel / param)
#print kernel.shape
return kernel
def unsupervised_acc_silhouette(X, labels, metric='euclidean'):
dists = sc.distances(X, gene_ids=np.arange(X.shape[1]), metric=metric)
num_lbls = np.unique(labels).size
if num_lbls > 1 and not np.any(np.isnan(dists)) and not np.any(np.isinf(dists)):
return metrics.silhouette_score(dists, labels, metric='precomputed')
return 0.0
def unsupervised_acc_kta(X, labels, kernel='linear', param=1.0, center=True, normalize=True):
Ky = np.zeros((labels.size, np.max(labels) + 1))
for i in range(len(labels)):
Ky[i, labels[i]] = 1.
if kernel == 'rbf':
Kx = get_kernel(X, X, type='rbf', param=param)
Ky = get_kernel(Ky.T, Ky.T, type='linear', param=param)
else:
Kx = X.T.dot(X)
Ky = Ky.dot(Ky.T)
if center:
Kx = center_kernel(Kx)
Ky = center_kernel(Ky)
if normalize:
Kx = normalize_kernel(Kx)
Ky = normalize_kernel(Ky)
return kta_align_general(Kx, Ky)
def get_transferability_score(W, H, trg_data, reps=100, alpha=0.0, l1=0.75, max_iter=100, rel_err=1e-3):
# estimate maximum error without any transfer
errs = np.zeros((reps,))
for i in range(errs.size):
rand_gene_inds = np.random.permutation(W.shape[0])
_, _, _, errs[i] = get_transferred_data_matrix(W[rand_gene_inds, :], trg_data, max_iter=max_iter, rel_err=rel_err)
#print 'Calculating non-permuted error score'
_, _, _, err_nonpermuted = get_transferred_data_matrix(W, trg_data, max_iter=max_iter, rel_err=rel_err) # minimum transfer error
nmf = decomp.NMF(alpha=alpha, init='nndsvdar', l1_ratio=l1, max_iter=max_iter,
n_components=W.shape[1], random_state=0, shuffle=True, solver='cd', tol=0.00001, verbose=0)
W_best = nmf.fit_transform(trg_data)
H_best = nmf.components_
err_best = np.sum(np.abs(trg_data - W_best.dot(H_best))) / np.float(trg_data.size) # absolute
err_curr = np.sum(np.abs(trg_data - W.dot(H))) / np.float(trg_data.size) # absolute
err_worst = np.max(errs)
errs[errs < err_best] = err_best
percs = 1.0 - (errs - err_best) / (err_worst - err_best)
score = 1.0 - np.max([err_curr - err_best, 0]) / (err_worst - err_best)
p_value = sum(errs < err_nonpermuted)/reps
# plt.hist(errs)
# plt.title("Histogram of random error scores")
# plt.axvline(err_best, color='k', linestyle='dashed', linewidth=1)
# plt.show()
return score, percs, p_value
def get_transferred_data_matrix(W, trg_data, normalize_H2=False, max_iter=100, rel_err=1e-3):
# initialize H: data matrix
H = np.random.randn(W.shape[1], trg_data.shape[1])
a1, a2 = np.where(H < 0.)
H[a1, a2] *= -1.
a1, a2 = np.where(H < 1e-10)
H[a1, a2] = 1e-10
n_iter = 0
err = 1e10
while n_iter < max_iter:
n_iter += 1
if np.any(W.T.dot(W.dot(H))==0.):
raise Exception('DA target: division by zero.')
H *= W.T.dot(trg_data) / W.T.dot(W.dot(H))
new_err = np.sum(np.abs(trg_data - W.dot(H))) / np.float(trg_data.size) # absolute
# new_err = np.sqrt(np.sum((Xtrg - W.dot(H))*(Xtrg - W.dot(H)))) / np.float(Xtrg.size) # frobenius
if np.abs((err - new_err) / err) <= rel_err and err >= new_err:
break
err = new_err
# print ' Number of iterations for reconstruction + reconstruction error : ', n_iter, new_err
H2 = np.zeros((W.shape[1], trg_data.shape[1]))
H2[(np.argmax(H, axis=0), np.arange(trg_data.shape[1]))] = 1
# H2[(np.argmax(H, axis=0), np.arange(trg_data.shape[1]))] = np.sum(H, axis=0) # DOES NOT WORK WELL!
# normalization
if normalize_H2:
#print 'Normalize H2.'
n_iter = 0
err = 1e10
sparse_rec_err = np.sum(np.abs(trg_data - W.dot(H2))) / np.float(trg_data.size) # absolute
#print n_iter, ': sparse rec error: ', sparse_rec_err
while n_iter < max_iter:
n_iter += 1
H2 *= W.T.dot(trg_data) / W.T.dot(W.dot(H2))
# foo = 0.05 * W.T.dot(trg_data - W.dot(H2))
# H2[np.argmax(H, axis=0), :] -= foo[np.argmax(H, axis=0), :]
sparse_rec_err = np.sum(np.abs(trg_data - W.dot(H2))) / np.float(trg_data.size) # absolute
#print n_iter, ': sparse rec error: ', sparse_rec_err
if np.abs((err - sparse_rec_err) / err) <= rel_err and err >= sparse_rec_err:
break
err = sparse_rec_err
return W, H, H2, new_err
def get_matching_gene_inds(src_gene_ids, trg_gene_ids):
if not np.unique(src_gene_ids).size == src_gene_ids.size:
# raise Exception('(MTL) Gene ids are supposed to be unique.')
print(('\nWarning! (MTL gene ids) Gene ids are supposed to be unique. '
'Only {0} of {1} entries are unique.'.format(np.unique(src_gene_ids).shape[0], src_gene_ids.shape[0])))
print('Only first occurance will be used.\n')
if not np.unique(trg_gene_ids).size == trg_gene_ids.size:
# raise Exception('(Target) Gene ids are supposed to be unique.')
print(('\nWarning! (Target gene ids) Gene ids are supposed to be unique. '
'Only {0} of {1} entries are unique.'.format(np.unique(trg_gene_ids).shape[0], trg_gene_ids.shape[0])))
print('Only first occurance will be used.\n')
# common_ids = np.intersect1d(trg_gene_ids, src_gene_ids)
# sort the common ids according to target gene ids
common_ids = []
for i in range(trg_gene_ids.size):
if np.any(trg_gene_ids[i] == src_gene_ids):
common_ids.append(trg_gene_ids[i])
# common_ids = np.array(common_ids, dtype=np.str)
common_ids = np.array(common_ids)
#print('Both datasets have (after processing) {0} (src={1}%,trg={2}%) gene ids in common.'.format(
# common_ids.shape[0],
# np.int(np.float(common_ids.size) / np.float(src_gene_ids.size)*100.0),
# np.int(np.float(common_ids.size) / np.float(trg_gene_ids.size)*100.0)))
#print('Number of common genes must not be 0!')
assert(common_ids.shape[0] > 0)
# find indices of common_ids in pgene_ids and gene_ids
inds1 = np.zeros(common_ids.shape[0], dtype=np.int)
inds2 = np.zeros(common_ids.shape[0], dtype=np.int)
for i in range(common_ids.shape[0]):
# 1: inds1[i] = np.where(common_ids[i] == trg_gene_ids)[0][0]
inds = np.where(common_ids[i] == trg_gene_ids)[0]
if inds.size > 1:
inds1[i] = inds[0]
else:
inds1[i] = inds
# 2: inds2[i] = np.where(common_ids[i] == src_gene_ids)[0][0]
inds = np.where(common_ids[i] == src_gene_ids)[0]
if inds.size > 1:
inds2[i] = inds[0]
else:
inds2[i] = inds
return inds1, inds2
|
nicococo/scRNA
|
scRNA/utils.py
|
Python
|
mit
| 11,107
|
[
"Gaussian"
] |
3acc461e9d416b73d08ac82c704d04da6898f226de870e72c18c84a09e661094
|
"""
Tests for geography support in PostGIS
"""
from __future__ import unicode_literals
import os
from unittest import skipUnless
from django.contrib.gis.db.models.functions import Area, Distance
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.measure import D
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from ..utils import oracle, postgis
from .models import City, County, Zipcode
@skipUnlessDBFeature("gis_enabled")
class GeographyTest(TestCase):
fixtures = ['initial']
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
@skipUnlessDBFeature("has_distance_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
Zipcode.objects.distance(htown.point)
@skipUnless(postgis, "This is a PostGIS-specific test")
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name': 'Name',
'state': 'State',
'mpoly': 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
@skipUnlessDBFeature("has_area_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439100.95415646 if oracle else 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
@skipUnlessDBFeature("gis_enabled")
class GeographyFunctionTests(TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_function(self):
"""
Testing Distance() support on non-point geography fields.
"""
ref_dists = [0, 4891.20, 8071.64, 9123.95]
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.annotate(distance=Distance('poly', htown.point))
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance.m, ref, 2)
@skipUnlessDBFeature("has_Area_function", "supports_distance_geodetic")
def test_geography_area(self):
"""
Testing that Area calculations work on geography columns.
"""
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439100.95415646 if oracle else 5439084.70637573
tol = 5
z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
|
DONIKAN/django
|
tests/gis_tests/geogapp/tests.py
|
Python
|
bsd-3-clause
| 5,944
|
[
"VisIt"
] |
a6b01a7c3c07d0a850236a5020755003cda4a29ee7809e92ec74698d74105377
|
"""Monte Carlo Tree Search, as described in Silver et al 2015.
This is a "pure" implementation of the AlphaGo MCTS algorithm in that it is not specific to the
game of Go; everything in this file is implemented generically with respect to some state, actions,
policy function, and value function.
"""
import numpy as np
from operator import itemgetter
class TreeNode(object):
"""A node in the MCTS tree. Each node keeps track of its own value Q, prior probability P, and
its visit-count-adjusted prior score u.
"""
def __init__(self, parent, prior_p):
self._parent = parent
self._children = {} # a map from action to TreeNode
self._n_visits = 0
self._Q = 0
# This value for u will be overwritten in the first call to update(), but is useful for
# choosing the first action from this node.
self._u = prior_p
self._P = prior_p
def expand(self, action_priors):
"""Expand tree by creating new children.
Arguments:
action_priors -- output from policy function - a list of tuples of actions and their prior
probability according to the policy function.
Returns:
None
"""
for action, prob in action_priors:
if action not in self._children:
self._children[action] = TreeNode(self, prob)
def select(self):
"""Select action among children that gives maximum action value, Q plus bonus u(P).
Returns:
A tuple of (action, next_node)
"""
return max(self._children.iteritems(), key=lambda act_node: act_node[1].get_value())
def update(self, leaf_value, c_puct):
"""Update node values from leaf evaluation.
Arguments:
leaf_value -- the value of subtree evaluation from the current player's perspective.
c_puct -- a number in (0, inf) controlling the relative impact of values, Q, and
prior probability, P, on this node's score.
Returns:
None
"""
# Count visit.
self._n_visits += 1
# Update Q, a running average of values for all visits.
self._Q += (leaf_value - self._Q) / self._n_visits
# Update u, the prior weighted by an exploration hyperparameter c_puct and the number of
# visits. Note that u is not normalized to be a distribution.
if not self.is_root():
self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)
def update_recursive(self, leaf_value, c_puct):
"""Like a call to update(), but applied recursively for all ancestors.
Note: it is important that this happens from the root downward so that 'parent' visit
counts are correct.
"""
# If it is not root, this node's parent should be updated first.
if self._parent:
self._parent.update_recursive(leaf_value, c_puct)
self.update(leaf_value, c_puct)
def get_value(self):
"""Calculate and return the value for this node: a combination of leaf evaluations, Q, and
this node's prior adjusted for its visit count, u
"""
return self._Q + self._u
def is_leaf(self):
"""Check if leaf node (i.e. no nodes below this have been expanded).
"""
return self._children == {}
def is_root(self):
return self._parent is None
class MCTS(object):
"""A simple (and slow) single-threaded implementation of Monte Carlo Tree Search.
Search works by exploring moves randomly according to the given policy up to a certain
depth, which is relatively small given the search space. "Leaves" at this depth are assigned a
value comprising a weighted combination of (1) the value function evaluated at that leaf, and
(2) the result of finishing the game from that leaf according to the 'rollout' policy. The
probability of revisiting a node changes over the course of the many playouts according to its
estimated value. Ultimately the most visited node is returned as the next action, not the most
valued node.
The term "playout" refers to a single search from the root, whereas "rollout" refers to the
fast evaluation from leaf nodes to the end of the game.
"""
def __init__(self, value_fn, policy_fn, rollout_policy_fn, lmbda=0.5, c_puct=5,
rollout_limit=500, playout_depth=20, n_playout=10000):
"""Arguments:
value_fn -- a function that takes in a state and ouputs a score in [-1, 1], i.e. the
expected value of the end game score from the current player's perspective.
policy_fn -- a function that takes in a state and outputs a list of (action, probability)
tuples for the current player.
rollout_policy_fn -- a coarse, fast version of policy_fn used in the rollout phase.
lmbda -- controls the relative weight of the value network and fast rollout policy result
in determining the value of a leaf node. lmbda must be in [0, 1], where 0 means use only
the value network and 1 means use only the result from the rollout.
c_puct -- a number in (0, inf) that controls how quickly exploration converges to the
maximum-value policy, where a higher value means relying on the prior more, and
should be used only in conjunction with a large value for n_playout.
"""
self._root = TreeNode(None, 1.0)
self._value = value_fn
self._policy = policy_fn
self._rollout = rollout_policy_fn
self._lmbda = lmbda
self._c_puct = c_puct
self._rollout_limit = rollout_limit
self._L = playout_depth
self._n_playout = n_playout
def _playout(self, state, leaf_depth):
"""Run a single playout from the root to the given depth, getting a value at the leaf and
propagating it back through its parents. State is modified in-place, so a copy must be
provided.
Arguments:
state -- a copy of the state.
leaf_depth -- after this many moves, leaves are evaluated.
Returns:
None
"""
node = self._root
for i in range(leaf_depth):
# Only expand node if it has not already been done. Existing nodes already know their
# prior.
if node.is_leaf():
action_probs = self._policy(state)
# Check for end of game.
if len(action_probs) == 0:
break
node.expand(action_probs)
# Greedily select next move.
action, node = node.select()
state.do_move(action)
# Evaluate the leaf using a weighted combination of the value network, v, and the game's
# winner, z, according to the rollout policy. If lmbda is equal to 0 or 1, only one of
# these contributes and the other may be skipped. Both v and z are from the perspective
# of the current player (+1 is good, -1 is bad).
v = self._value(state) if self._lmbda < 1 else 0
z = self._evaluate_rollout(state, self._rollout_limit) if self._lmbda > 0 else 0
leaf_value = (1 - self._lmbda) * v + self._lmbda * z
# Update value and visit count of nodes in this traversal.
node.update_recursive(leaf_value, self._c_puct)
def _evaluate_rollout(self, state, limit):
"""Use the rollout policy to play until the end of the game, returning +1 if the current
player wins, -1 if the opponent wins, and 0 if it is a tie.
"""
player = state.get_current_player()
for i in range(limit):
action_probs = self._rollout(state)
if len(action_probs) == 0:
break
max_action = max(action_probs, key=itemgetter(1))[0]
state.do_move(max_action)
else:
# If no break from the loop, issue a warning.
print("WARNING: rollout reached move limit")
return 1 if state.get_winner_color() == player else -1
def get_move(self, state):
"""Runs all playouts sequentially and returns the most visited action.
Arguments:
state -- the current state, including both game state and the current player.
Returns:
the selected action
"""
for n in range(self._n_playout):
state_copy = state.copy()
self._playout(state_copy, self._L)
# chosen action is the *most visited child*, not the highest-value one
# (they are the same as self._n_playout gets large).
return max(self._root._children.iteritems(), key=lambda act_node: act_node[1]._n_visits)[0]
def update_with_move(self, last_move):
"""Step forward in the tree, keeping everything we already know about the subtree, assuming
that get_move() has been called already. Siblings of the new root will be garbage-collected.
"""
if last_move in self._root._children:
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0)
class ParallelMCTS(MCTS):
pass
|
Rochester-NRT/RocAlphaGo
|
AlphaGo/mcts.py
|
Python
|
mit
| 9,222
|
[
"VisIt"
] |
445d038ca24b42d5f2853214800de05c78261b849a312862c0874ccc5b707a27
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DirectRunner, executing on the local machine.
The DirectRunner is a runner implementation that executes the entire
graph of transformations belonging to a pipeline on the local machine.
"""
from __future__ import absolute_import
import itertools
import logging
import time
from google.protobuf import wrappers_pb2
import apache_beam as beam
from apache_beam import coders
from apache_beam import typehints
from apache_beam.internal.util import ArgumentPlaceholder
from apache_beam.options.pipeline_options import DirectOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.pvalue import PCollection
from apache_beam.runners.direct.bundle_factory import BundleFactory
from apache_beam.runners.direct.clock import RealClock
from apache_beam.runners.direct.clock import TestClock
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.transforms.core import CombinePerKey
from apache_beam.transforms.core import CombineValuesDoFn
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import _GroupAlsoByWindow
from apache_beam.transforms.core import _GroupAlsoByWindowDoFn
from apache_beam.transforms.core import _GroupByKeyOnly
from apache_beam.transforms.ptransform import PTransform
# Note that the BundleBasedDirectRunner and SwitchingDirectRunner names are
# experimental and have no backwards compatibility guarantees.
__all__ = ['BundleBasedDirectRunner',
'DirectRunner',
'SwitchingDirectRunner']
class SwitchingDirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine.
This implementation switches between using the FnApiRunner (which has
high throughput for batch jobs) and using the BundleBasedDirectRunner,
which supports streaming execution and certain primitives not yet
implemented in the FnApiRunner.
"""
def run_pipeline(self, pipeline):
use_fnapi_runner = True
# Streaming mode is not yet supported on the FnApiRunner.
if pipeline._options.view_as(StandardOptions).streaming:
use_fnapi_runner = False
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.common import DoFnSignature
from apache_beam.runners.dataflow.native_io.iobase import NativeSource
from apache_beam.runners.dataflow.native_io.iobase import _NativeWrite
from apache_beam.testing.test_stream import TestStream
class _FnApiRunnerSupportVisitor(PipelineVisitor):
"""Visitor determining if a Pipeline can be run on the FnApiRunner."""
def accept(self, pipeline):
self.supported_by_fnapi_runner = True
pipeline.visit(self)
return self.supported_by_fnapi_runner
def visit_transform(self, applied_ptransform):
transform = applied_ptransform.transform
# The FnApiRunner does not support streaming execution.
if isinstance(transform, TestStream):
self.supported_by_fnapi_runner = False
# The FnApiRunner does not support reads from NativeSources.
if (isinstance(transform, beam.io.Read) and
isinstance(transform.source, NativeSource)):
self.supported_by_fnapi_runner = False
# The FnApiRunner does not support the use of _NativeWrites.
if isinstance(transform, _NativeWrite):
self.supported_by_fnapi_runner = False
if isinstance(transform, beam.ParDo):
dofn = transform.dofn
# The FnApiRunner does not support execution of SplittableDoFns.
if DoFnSignature(dofn).is_splittable_dofn():
self.supported_by_fnapi_runner = False
# The FnApiRunner does not support execution of DoFns with timers.
if DoFnSignature(dofn).has_timers():
self.supported_by_fnapi_runner = False
# The FnApiRunner does not support execution of CombineFns with
# deferred side inputs.
if isinstance(dofn, CombineValuesDoFn):
args, kwargs = transform.raw_side_inputs
args_to_check = itertools.chain(args,
kwargs.values())
if any(isinstance(arg, ArgumentPlaceholder)
for arg in args_to_check):
self.supported_by_fnapi_runner = False
# Check whether all transforms used in the pipeline are supported by the
# FnApiRunner.
use_fnapi_runner = _FnApiRunnerSupportVisitor().accept(pipeline)
# Also ensure grpc is available.
try:
# pylint: disable=unused-variable
import grpc
except ImportError:
use_fnapi_runner = False
if use_fnapi_runner:
from apache_beam.runners.portability.fn_api_runner import FnApiRunner
runner = FnApiRunner()
else:
runner = BundleBasedDirectRunner()
return runner.run_pipeline(pipeline)
# Type variables.
K = typehints.TypeVariable('K')
V = typehints.TypeVariable('V')
@typehints.with_input_types(typehints.KV[K, V])
@typehints.with_output_types(typehints.KV[K, typehints.Iterable[V]])
class _StreamingGroupByKeyOnly(_GroupByKeyOnly):
"""Streaming GroupByKeyOnly placeholder for overriding in DirectRunner."""
urn = "direct_runner:streaming_gbko:v0.1"
# These are needed due to apply overloads.
def to_runner_api_parameter(self, unused_context):
return _StreamingGroupByKeyOnly.urn, None
@PTransform.register_urn(urn, None)
def from_runner_api_parameter(unused_payload, unused_context):
return _StreamingGroupByKeyOnly()
@typehints.with_input_types(typehints.KV[K, typehints.Iterable[V]])
@typehints.with_output_types(typehints.KV[K, typehints.Iterable[V]])
class _StreamingGroupAlsoByWindow(_GroupAlsoByWindow):
"""Streaming GroupAlsoByWindow placeholder for overriding in DirectRunner."""
urn = "direct_runner:streaming_gabw:v0.1"
# These are needed due to apply overloads.
def to_runner_api_parameter(self, context):
return (
_StreamingGroupAlsoByWindow.urn,
wrappers_pb2.BytesValue(value=context.windowing_strategies.get_id(
self.windowing)))
@PTransform.register_urn(urn, wrappers_pb2.BytesValue)
def from_runner_api_parameter(payload, context):
return _StreamingGroupAlsoByWindow(
context.windowing_strategies.get_by_id(payload.value))
def _get_transform_overrides(pipeline_options):
# A list of PTransformOverride objects to be applied before running a pipeline
# using DirectRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal use only; no backwards-compatibility guarantees.
# Importing following locally to avoid a circular dependency.
from apache_beam.pipeline import PTransformOverride
from apache_beam.runners.sdf_common import SplittableParDoOverride
from apache_beam.runners.direct.helper_transforms import LiftedCombinePerKey
from apache_beam.runners.direct.sdf_direct_runner import ProcessKeyedElementsViaKeyedWorkItemsOverride
class CombinePerKeyOverride(PTransformOverride):
def matches(self, applied_ptransform):
if isinstance(applied_ptransform.transform, CombinePerKey):
return True
def get_replacement_transform(self, transform):
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
try:
return LiftedCombinePerKey(transform.fn, transform.args,
transform.kwargs)
except NotImplementedError:
return transform
class StreamingGroupByKeyOverride(PTransformOverride):
def matches(self, applied_ptransform):
# Note: we match the exact class, since we replace it with a subclass.
return applied_ptransform.transform.__class__ == _GroupByKeyOnly
def get_replacement_transform(self, transform):
# Use specialized streaming implementation.
transform = _StreamingGroupByKeyOnly()
return transform
class StreamingGroupAlsoByWindowOverride(PTransformOverride):
def matches(self, applied_ptransform):
# Note: we match the exact class, since we replace it with a subclass.
transform = applied_ptransform.transform
return (isinstance(applied_ptransform.transform, ParDo) and
isinstance(transform.dofn, _GroupAlsoByWindowDoFn) and
transform.__class__ != _StreamingGroupAlsoByWindow)
def get_replacement_transform(self, transform):
# Use specialized streaming implementation.
transform = _StreamingGroupAlsoByWindow(transform.dofn.windowing)
return transform
overrides = [SplittableParDoOverride(),
ProcessKeyedElementsViaKeyedWorkItemsOverride(),
CombinePerKeyOverride()]
# Add streaming overrides, if necessary.
if pipeline_options.view_as(StandardOptions).streaming:
overrides.append(StreamingGroupByKeyOverride())
overrides.append(StreamingGroupAlsoByWindowOverride())
# Add PubSub overrides, if PubSub is available.
try:
from apache_beam.io.gcp import pubsub as unused_pubsub
overrides += _get_pubsub_transform_overrides(pipeline_options)
except ImportError:
pass
return overrides
class _DirectReadFromPubSub(PTransform):
def __init__(self, source):
self._source = source
def _infer_output_coder(self, unused_input_type=None,
unused_input_coder=None):
return coders.BytesCoder()
def get_windowing(self, inputs):
return beam.Windowing(beam.window.GlobalWindows())
def expand(self, pvalue):
# This is handled as a native transform.
return PCollection(self.pipeline)
class _DirectWriteToPubSubFn(DoFn):
BUFFER_SIZE_ELEMENTS = 100
FLUSH_TIMEOUT_SECS = BUFFER_SIZE_ELEMENTS * 0.5
def __init__(self, sink):
self.project = sink.project
self.short_topic_name = sink.topic_name
self.id_label = sink.id_label
self.timestamp_attribute = sink.timestamp_attribute
self.with_attributes = sink.with_attributes
# TODO(BEAM-4275): Add support for id_label and timestamp_attribute.
if sink.id_label:
raise NotImplementedError('DirectRunner: id_label is not supported for '
'PubSub writes')
if sink.timestamp_attribute:
raise NotImplementedError('DirectRunner: timestamp_attribute is not '
'supported for PubSub writes')
def start_bundle(self):
self._buffer = []
def process(self, elem):
self._buffer.append(elem)
if len(self._buffer) >= self.BUFFER_SIZE_ELEMENTS:
self._flush()
def finish_bundle(self):
self._flush()
def _flush(self):
from google.cloud import pubsub
pub_client = pubsub.PublisherClient()
topic = pub_client.topic_path(self.project, self.short_topic_name)
if self.with_attributes:
futures = [pub_client.publish(topic, elem.data, **elem.attributes)
for elem in self._buffer]
else:
futures = [pub_client.publish(topic, elem)
for elem in self._buffer]
timer_start = time.time()
for future in futures:
remaining = self.FLUSH_TIMEOUT_SECS - (time.time() - timer_start)
future.result(remaining)
self._buffer = []
def _get_pubsub_transform_overrides(pipeline_options):
from apache_beam.io.gcp import pubsub as beam_pubsub
from apache_beam.pipeline import PTransformOverride
class ReadFromPubSubOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform,
beam_pubsub.ReadFromPubSub)
def get_replacement_transform(self, transform):
if not pipeline_options.view_as(StandardOptions).streaming:
raise Exception('PubSub I/O is only available in streaming mode '
'(use the --streaming flag).')
return _DirectReadFromPubSub(transform._source)
class WriteToPubSubOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(
applied_ptransform.transform,
(beam_pubsub.WriteToPubSub, beam_pubsub._WriteStringsToPubSub))
def get_replacement_transform(self, transform):
if not pipeline_options.view_as(StandardOptions).streaming:
raise Exception('PubSub I/O is only available in streaming mode '
'(use the --streaming flag).')
return beam.ParDo(_DirectWriteToPubSubFn(transform._sink))
return [ReadFromPubSubOverride(), WriteToPubSubOverride()]
class BundleBasedDirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine."""
def run_pipeline(self, pipeline):
"""Execute the entire pipeline and returns an DirectPipelineResult."""
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import \
ConsumerTrackingPipelineVisitor
from apache_beam.runners.direct.evaluation_context import EvaluationContext
from apache_beam.runners.direct.executor import Executor
from apache_beam.runners.direct.transform_evaluator import \
TransformEvaluatorRegistry
from apache_beam.testing.test_stream import TestStream
# Performing configured PTransform overrides.
pipeline.replace_all(_get_transform_overrides(pipeline.options))
# If the TestStream I/O is used, use a mock test clock.
class _TestStreamUsageVisitor(PipelineVisitor):
"""Visitor determining whether a Pipeline uses a TestStream."""
def __init__(self):
self.uses_test_stream = False
def visit_transform(self, applied_ptransform):
if isinstance(applied_ptransform.transform, TestStream):
self.uses_test_stream = True
visitor = _TestStreamUsageVisitor()
pipeline.visit(visitor)
clock = TestClock() if visitor.uses_test_stream else RealClock()
# TODO(BEAM-4274): Circular import runners-metrics. Requires refactoring.
from apache_beam.metrics.execution import MetricsEnvironment
MetricsEnvironment.set_metrics_supported(True)
logging.info('Running pipeline with DirectRunner.')
self.consumer_tracking_visitor = ConsumerTrackingPipelineVisitor()
pipeline.visit(self.consumer_tracking_visitor)
evaluation_context = EvaluationContext(
pipeline._options,
BundleFactory(stacked=pipeline._options.view_as(DirectOptions)
.direct_runner_use_stacked_bundle),
self.consumer_tracking_visitor.root_transforms,
self.consumer_tracking_visitor.value_to_consumers,
self.consumer_tracking_visitor.step_names,
self.consumer_tracking_visitor.views,
clock)
executor = Executor(self.consumer_tracking_visitor.value_to_consumers,
TransformEvaluatorRegistry(evaluation_context),
evaluation_context)
# DirectRunner does not support injecting
# PipelineOptions values at runtime
RuntimeValueProvider.set_runtime_options({})
# Start the executor. This is a non-blocking call, it will start the
# execution in background threads and return.
executor.start(self.consumer_tracking_visitor.root_transforms)
result = DirectPipelineResult(executor, evaluation_context)
return result
# Use the SwitchingDirectRunner as the default.
DirectRunner = SwitchingDirectRunner
class DirectPipelineResult(PipelineResult):
"""A DirectPipelineResult provides access to info about a pipeline."""
def __init__(self, executor, evaluation_context):
super(DirectPipelineResult, self).__init__(PipelineState.RUNNING)
self._executor = executor
self._evaluation_context = evaluation_context
def __del__(self):
if self._state == PipelineState.RUNNING:
logging.warning(
'The DirectPipelineResult is being garbage-collected while the '
'DirectRunner is still running the corresponding pipeline. This may '
'lead to incomplete execution of the pipeline if the main thread '
'exits before pipeline completion. Consider using '
'result.wait_until_finish() to wait for completion of pipeline '
'execution.')
def wait_until_finish(self, duration=None):
if not PipelineState.is_terminal(self.state):
if duration:
raise NotImplementedError(
'DirectRunner does not support duration argument.')
try:
self._executor.await_completion()
self._state = PipelineState.DONE
except: # pylint: disable=broad-except
self._state = PipelineState.FAILED
raise
return self._state
def aggregated_values(self, aggregator_or_name):
return self._evaluation_context.get_aggregator_values(aggregator_or_name)
def metrics(self):
return self._evaluation_context.metrics()
def cancel(self):
"""Shuts down pipeline workers.
For testing use only. Does not properly wait for pipeline workers to shut
down.
"""
self._state = PipelineState.CANCELLING
self._executor.shutdown()
self._state = PipelineState.CANCELLED
|
charlesccychen/beam
|
sdks/python/apache_beam/runners/direct/direct_runner.py
|
Python
|
apache-2.0
| 18,219
|
[
"VisIt"
] |
e1bafa31c952cc8da7ac5a1f01e2dd02b44f7715fb9f090e696c8c7c7518555c
|
# This file is part of cclib (http://cclib.sf.net), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2007, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
__revision__ = "$Revision$"
import numpy
import bettertest
class GenericCCTest(bettertest.TestCase):
"""Coupled cluster unittest."""
def testsign(self):
corrections = self.data.ccenergies - self.data.scfenergies
self.failUnless(numpy.alltrue(corrections < 0.0))
class GenericCCDTest(GenericCCTest):
"""CCD unittest."""
def testsign(self):
"""CCD: Are the Coupled cluster corrections negative?"""
super(GenericCCDTest, self).testsign()
class GenericCCSDTest(GenericCCTest):
"""CCSD unittest."""
def testsign(self):
"""CCSD: Are the Coupled cluster corrections negative?"""
super(GenericCCSDTest, self).testsign()
class GenericCCSDTTest(GenericCCTest):
"""CCSD(T) unittest."""
def testsign(self):
"""CCSD(T): Are the Coupled cluster correction negative?"""
super(GenericCCSDTTest, self).testsign()
class GAMESSUSCCDTest(GenericCCDTest):
"""GAMESS-US CCD unittest."""
old_tests = ["GAMESS/GAMESS-US/water_ccd_2005.06.27.r3.out.gz"]
class GAMESSUSCCSDTest(GenericCCSDTest):
"""GAMESS-US CCSD unittest."""
old_tests = ["GAMESS/GAMESS-US/water_ccsd_2005.06.27.r3.out.gz"]
class GAMESSUSCCSDTTest(GenericCCSDTTest):
"""GAMESS-US CCSD(T) unittest."""
old_tests = ["GAMESS/GAMESS-US/water_ccsd(t)_2005.06.27.r3.out.gz"]
class GaussianCCDTest(GenericCCDTest):
"""Gaussian CCD unittest."""
class GaussianCCSDTest(GenericCCSDTest):
"""Gaussian CCSD unittest."""
class GaussianCCSDTTest(GenericCCSDTTest):
"""Gaussian CCSD(T) unittest."""
class MolproCCDTest(GenericCCDTest):
"""Molpro CCD unittest."""
class MolproCCSDTest(GenericCCSDTest):
"""Molpro CCSD unittest."""
class MolproCCSDTTest(GenericCCSDTTest):
"""Molpro CCSD(T) unittest."""
if __name__ == "__main__":
from testall import testall
testall(modules=["CC"])
|
Clyde-fare/cclib_bak
|
test/testCC.py
|
Python
|
lgpl-2.1
| 2,406
|
[
"GAMESS",
"Gaussian",
"Molpro",
"cclib"
] |
9a126efad048f7e2ae2a51f71c5ffe06c7b590fa67f48c3f8d13487bb1f21892
|
#!/usr/bin/python
#
# CCLib_proxy Utilities - BlueGiga Specific
# Copyright (c) 2014 Ioannis Charalampidis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
from cclib import CCHEXFile, getOptions, openCCDebugger
from cclib.extensions.bluegiga import BlueGigaCCDebugger
import sys
# Get serial port either form environment or from arguments
opts = getOptions("BlueGiga-Specific CCDebugger Flash Writer Tool", hexIn=True,
license=":A 32-byte, hex representation of the license key (64 characters)",
addr=":A bluetooth mac address in XX:XX:XX:XX:XX:XX format",
ver=":A decimal number that defines the hardware version",
erase="Full chip erase before write",
offset=":Offset the addresses in the .hex file by this value")
# Open debugger
try:
dbg = openCCDebugger(opts['port'], enterDebug=opts['enter'], driver=BlueGigaCCDebugger)
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(1)
# Get offset
offset = 0
if opts['offset']:
if opts['offset'][0:2] == "0x":
offset = int(opts['offset'], 16)
else:
offset = int(opts['offset'])
print("NOTE: The memory addresses are offset by %i bytes!" % offset)
# Get bluegiga-specific info
binfo = dbg.getBLEInfo()
serial = dbg.getSerial()
# Check if we have missing license
btaMessage=""
hwvMessage=""
licMessage=""
hasLicense = False
for x in binfo['license']:
if x != "f":
hasLicense = True
break
if not hasLicense:
if opts['license'] is None:
print("ERROR: Your device has no license key")
print("ERROR: You must specify a license key from the command line!")
sys.exit(5)
else:
licKey = opts['license']
if len(licKey) != 64:
print("ERROR: Invalid license key specified!")
sys.exit(5)
else:
licMessage = "(From command-line)"
binfo['license'] = licKey
if opts['addr'] is None:
if not hasLicense:
binfo['btaddr'] = "".join([ "%s:" % serial[x:x+2] for x in range(0,len(serial),2) ])[0:-1]
btaMessage = " (Generated using IEEE address)"
else:
if len(opts['addr']) != 17:
print("ERROR: Invalid BT Address specified!")
sys.exit(5)
btaMessage = "(From command-line)"
binfo['btaddr'] = opts['addr']
# Reset Hardware Version
if opts['ver'] is None:
if not hasLicense:
binfo['hwver'] = 0x01
else:
hwvMessage = "(From command-line)"
binfo['hwver'] = int(opts['ver'])
# Print collected license information
print("\nLicense information:")
print(" IEEE Address : %s" % serial)
print(" H/W Version : %02x" % binfo['hwver'], hwvMessage)
print(" BT Address : %s" % binfo['btaddr'], btaMessage)
print(" License : %s" % binfo['license'], licMessage)
print("")
# Parse the HEX file
hexFile = CCHEXFile( opts['in'] )
hexFile.load()
# Display sections & calculate max memory usage
maxMem = 0
print("Sections in %s:\n" % opts['in'])
print(" Addr. Size")
print("-------- -------------")
for mb in hexFile.memBlocks:
# Calculate top position
memTop = mb.addr + mb.size
if memTop > maxMem:
maxMem = memTop
# Print portion
print(" 0x%04x %i B " % (mb.addr + offset, mb.size))
print("")
# Check for oversize data
if maxMem > (dbg.chipInfo['flash'] * 1024):
print("ERROR: Data too bit to fit in chip's memory!")
sys.exit(4)
# Update BLE information on the file
hexFile.set( dbg.flashSize-57, [ int(binfo['license'][x:x+2],16) for x in range(0,len(binfo['license']),2) ] )
hexFile.set( dbg.flashSize-25, [ binfo['hwver'] ])
hexFile.set( dbg.flashSize-22, [ int(binfo['btaddr'][x:x+2],16) for x in range(0,len(binfo['btaddr']),3) ] )
# Confirm
erasePrompt = "OVERWRITE"
if opts['erase']:
erasePrompt = "ERASE and REPROGRAM"
print("This is going to %s the chip. Are you sure? <y/N>: " % erasePrompt, end=' ')
ans = sys.stdin.readline()[0:-1]
if (ans != "y") and (ans != "Y"):
print("Aborted")
sys.exit(2)
# Get BLE info page
print("\nFlashing:")
# Check for PStore
pssize = dbg.getBLEPStoreSize()
if pssize > 0:
print(" - Backing-up PS Store (%i Bytes)..." % pssize)
pstoreData = dbg.readCODE( 0x18000, pssize )
hexFile.set( 0x18000, pstoreData )
# Send chip erase
if opts['erase']:
print(" - Chip erase...")
try:
dbg.chipErase()
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(3)
# Flash memory
dbg.pauseDMA(False)
print(" - Flashing %i memory blocks..." % len(hexFile.memBlocks))
for mb in hexFile.memBlocks:
# Flash memory block
print(" -> 0x%04x : %i bytes " % (mb.addr + offset, mb.size))
try:
dbg.writeCODE( mb.addr + offset, mb.bytes, verify=True, showProgress=True )
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(3)
# Done
print("\nCompleted")
print("")
|
wavesoft/CCLib
|
Python/ble_write_flash.py
|
Python
|
gpl-3.0
| 5,198
|
[
"cclib"
] |
1daffac84d22243c66997f9a15c90e486b29be385e70206a5c908657b97876d3
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import os
import sys
import subprocess
import unittest
import platform
import tempfile
import shutil
from importlib import import_module
from glob import glob
from os import devnull
#
#
#
class NotAvailable(Exception):
pass
#
#
#
class ScriptTestCase(unittest.TestCase):
def __init__(self, methodname='testfile', filename=None):
unittest.TestCase.__init__(self, methodname)
self.filename = filename
def testfile(self):
try:
with open(self.filename) as fd:
exec(compile(fd.read(), self.filename, 'exec'), {})
except KeyboardInterrupt:
raise RuntimeError('Keyboard interrupt')
except ImportError as ex:
module = ex.args[0].split()[-1].replace("'", '').split('.')[0]
if module in ['scipy', 'matplotlib', 'Scientific', 'lxml',
'flask', 'argparse']:
sys.__stdout__.write('skipped (no {0} module) '.format(module))
else:
raise
except NotAvailable as notavailable:
sys.__stdout__.write('skipped ')
msg = str(notavailable)
if msg:
sys.__stdout__.write('({0}) '.format(msg))
def id(self):
return self.filename
def __str__(self):
return self.filename.split('test/')[-1]
def __repr__(self):
return "ScriptTestCase(filename='%s')" % self.filename
#
#
#
def test(verbosity=1, testdir=None, stream=sys.stdout, files=None, siesta_exe='siesta'):
"""
files :
"""
ts = unittest.TestSuite()
if files:
files = [os.path.join(__path__[0], f) for f in files]
else:
files = glob(__path__[0] + '/*')
sdirtests = []
tests = []
# look files in sub dir
for f in files:
# look first level sub dir
if os.path.isdir(f):
files_sub = glob(f+'/*')
sdirtests.extend(glob(f + '/*.py'))
# second level sub dir
for fsub in files_sub:
if os.path.isdir(fsub):
sdirtests.extend(glob(fsub + '/*.py'))
else:
if fsub.endswith('.py'):
tests.append(fsub)
else:
if f.endswith('.py'):
tests.append(f)
for test in tests + sdirtests:
if test.endswith('__.py'):
continue
ts.addTest(ScriptTestCase(filename=os.path.abspath(test)))
versions = [('platform', platform.platform()),
('python-' + sys.version.split()[0], sys.executable)]
for name in ['pyscf', 'numpy', 'scipy']:
try:
module = import_module(name)
except ImportError:
versions.append((name, 'no'))
else:
versions.append((name + '-' + module.__version__,
module.__file__.rsplit('/', 1)[0] + '/'))
if verbosity:
for a, b in versions:
print('{0:16}{1}'.format(a, b))
sys.stdout = open(devnull, 'w')
if verbosity == 0:
stream = open(devnull, 'w')
ttr = unittest.TextTestRunner(verbosity=verbosity, stream=stream)
origcwd = os.getcwd()
if testdir is None:
testdir = tempfile.mkdtemp(prefix='pyscf-test-')
else:
if os.path.isdir(testdir):
shutil.rmtree(testdir) # clean before running tests!
os.mkdir(testdir)
os.chdir(testdir)
if verbosity:
print('test-dir ', testdir, '\n', file=sys.__stdout__)
try:
results = ttr.run(ts)
finally:
os.chdir(origcwd)
sys.stdout = sys.__stdout__
return results
|
gkc1000/pyscf
|
pyscf/nao/test/__init__.py
|
Python
|
apache-2.0
| 4,319
|
[
"PySCF",
"SIESTA"
] |
f5f42d17ff6e1f101f59b7c65272b24b661cda9d52c4721f0ee15c35fe41afa4
|
from builtins import object
###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
import re
from slimit import ast
from slimit.lexer import Lexer
_HAS_ID_MATCH = re.compile('^%s$' % Lexer.identifier).match
def _is_identifier(value):
return _HAS_ID_MATCH(value) and value not in Lexer.keywords_dict
class ECMAMinifier(object):
def __init__(self):
self.in_block = 0
self.ifelse_stack = []
def visit(self, node):
method = 'visit_%s' % node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
return 'GEN: %r' % node
def visit_Program(self, node):
return ''.join(self.visit(child) for child in node)
def visit_Block(self, node):
children = [self.visit(child) for child in node]
if len(children) == 1:
return children[0]
else:
return '{%s}' % ''.join(children)
def visit_VarStatement(self, node):
s = 'var %s;' % ','.join(self.visit(child) for child in node)
return s
def visit_VarDecl(self, node):
output = []
output.append(self.visit(node.identifier))
if node.initializer is not None:
output.append('=%s' % self.visit(node.initializer))
return ''.join(output)
def visit_Identifier(self, node):
return node.value
def visit_Assign(self, node):
template = '%s%s%s'
if getattr(node, '_parens', False):
template = '(%s)' % template
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_GetPropAssign(self, node):
template = 'get %s(){%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
return template % (
self.visit(node.prop_name),
''.join(self.visit(element) for element in node.elements)
)
def visit_SetPropAssign(self, node):
template = 'set %s(%s){%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
if len(node.parameters) > 1:
raise SyntaxError(
'Setter functions must have one argument: %s' % node)
return template % (
self.visit(node.prop_name),
''.join(self.visit(param) for param in node.parameters),
''.join(self.visit(element) for element in node.elements)
)
def visit_Number(self, node):
return node.value
def visit_Comma(self, node):
template = '%s,%s'
if getattr(node, '_parens', False):
template = '(%s)' % template
return template % (self.visit(node.left), self.visit(node.right))
def visit_EmptyStatement(self, node):
return node.value
def visit_If(self, node):
has_alternative = node.alternative is not None
def _is_singleline_block(n):
return isinstance(n, ast.Block) and (len(n.children()) == 1)
s = 'if('
if node.predicate is not None:
s += self.visit(node.predicate)
s += ')'
# if we are an 'if..else' statement and 'if' part contains only
# one statement
if has_alternative and _is_singleline_block(node.consequent):
self.ifelse_stack.append({'if_in_ifelse': False})
consequent = self.visit(node.consequent)
record = self.ifelse_stack.pop()
if record['if_in_ifelse']:
s += '{%s}' % consequent
else:
s += consequent
elif has_alternative:
# we are an 'if..else' statement and 'if' part contains
# myltiple statements
s += self.visit(node.consequent)
else:
# 'if' without alternative - mark it so that an enclosing
# 'if..else' can act on it and add braces around 'if' part
if self.ifelse_stack:
self.ifelse_stack[-1]['if_in_ifelse'] = True
s += self.visit(node.consequent)
if has_alternative:
alternative = self.visit(node.alternative)
if alternative.startswith(('(', '{')):
s += 'else%s' % alternative
else:
s += 'else %s' % alternative
return s
def visit_Boolean(self, node):
return node.value
def visit_For(self, node):
s = 'for('
if node.init is not None:
s += self.visit(node.init)
if node.init is None:
s += ';'
elif isinstance(node.init, (ast.Assign, ast.Comma, ast.Conditional,
ast.FunctionCall, ast.UnaryOp,
ast.Identifier)):
s += ';'
else:
s += ''
if node.cond is not None:
s += self.visit(node.cond)
s += ';'
if node.count is not None:
s += self.visit(node.count)
s += ')' + self.visit(node.statement)
return s
def visit_ForIn(self, node):
if isinstance(node.item, ast.VarDecl):
template = 'for(var %s in %s)'
else:
template = 'for(%s in %s)'
s = template % (self.visit(node.item), self.visit(node.iterable))
s += self.visit(node.statement)
return s
def visit_BinOp(self, node):
if node.op in ('instanceof', 'in'):
template = '%s %s %s'
elif (node.op == '+' and
isinstance(node.right, ast.UnaryOp) and
node.right.op == '++' and not node.right.postfix
):
# make a space between + and ++
# https://github.com/rspivak/slimit/issues/26
template = '%s%s %s'
else:
template = '%s%s%s'
if getattr(node, '_parens', False):
template = '(%s)' % template
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_UnaryOp(self, node):
s = self.visit(node.value)
if node.postfix:
s += node.op
elif node.op in ('delete', 'void', 'typeof'):
s = '%s %s' % (node.op, s)
else:
s = '%s%s' % (node.op, s)
if getattr(node, '_parens', False):
s = '(%s)' % s
return s
def visit_ExprStatement(self, node):
return '%s;' % self.visit(node.expr)
def visit_DoWhile(self, node):
statement = self.visit(node.statement)
if statement.startswith(('{', '(')):
s = 'do%s' % statement
else:
s = 'do %s' % statement
s += 'while(%s);' % self.visit(node.predicate)
return s
def visit_While(self, node):
s = 'while(%s)' % self.visit(node.predicate)
s += self.visit(node.statement)
return s
def visit_Null(self, node):
return 'null'
def visit_String(self, node):
return node.value
def visit_Continue(self, node):
if node.identifier is not None:
s = 'continue %s;' % self.visit_Identifier(node.identifier)
else:
s = 'continue;'
return s
def visit_Break(self, node):
if node.identifier is not None:
s = 'break %s;' % self.visit_Identifier(node.identifier)
else:
s = 'break;'
return s
def visit_Return(self, node):
if node.expr is None:
return 'return;'
expr_text = self.visit(node.expr)
if expr_text.startswith(('(', '{')):
return 'return%s;' % expr_text
else:
return 'return %s;' % expr_text
def visit_With(self, node):
s = 'with(%s)' % self.visit(node.expr)
s += self.visit(node.statement)
return s
def visit_Label(self, node):
s = '%s:%s' % (
self.visit(node.identifier), self.visit(node.statement))
return s
def visit_Switch(self, node):
s = 'switch(%s){' % self.visit(node.expr)
for case in node.cases:
s += self.visit_Case(case)
if node.default is not None:
s += self.visit_Default(node.default)
s += '}'
return s
def visit_Case(self, node):
s = 'case %s:' % self.visit(node.expr)
elements = ''.join(self.visit(element) for element in node.elements)
if elements:
s += elements
return s
def visit_Default(self, node):
s = 'default:'
s += ''.join(self.visit(element) for element in node.elements)
if node.elements is not None:
s += ''
return s
def visit_Throw(self, node):
s = 'throw %s;' % self.visit(node.expr)
return s
def visit_Debugger(self, node):
return '%s;' % node.value
def visit_Try(self, node):
result = self.visit(node.statements)
if result.startswith('{'):
s = 'try%s' % result
else:
s = 'try{%s}' % result
if node.catch is not None:
s += self.visit(node.catch)
if node.fin is not None:
s += self.visit(node.fin)
return s
def visit_Catch(self, node):
ident = self.visit(node.identifier)
result = self.visit(node.elements)
if result.startswith('{'):
s = 'catch(%s)%s' % (ident, result)
else:
s = 'catch(%s){%s}' % (ident, result)
return s
def visit_Finally(self, node):
result = self.visit(node.elements)
if result.startswith('{'):
s = 'finally%s' % result
else:
s = 'finally{%s}' % result
return s
def visit_FuncDecl(self, node):
elements = ''.join(self.visit(element) for element in node.elements)
s = 'function %s(%s){%s' % (
self.visit(node.identifier),
','.join(self.visit(param) for param in node.parameters),
elements,
)
s += '}'
return s
def visit_FuncExpr(self, node):
elements = ''.join(self.visit(element) for element in node.elements)
ident = node.identifier
ident = '' if ident is None else ' %s' % self.visit(ident)
header = 'function%s(%s)'
if getattr(node, '_parens', False):
header = '(' + header
s = (header + '{%s') % (
ident,
','.join(self.visit(param) for param in node.parameters),
elements,
)
s += '}'
if getattr(node, '_parens', False):
s += ')'
return s
def visit_Conditional(self, node):
if getattr(node, '_parens', False):
template = '(%s?%s:%s)'
else:
template = '%s?%s:%s'
s = template % (
self.visit(node.predicate),
self.visit(node.consequent), self.visit(node.alternative))
return s
def visit_Regex(self, node):
if getattr(node, '_parens', False):
return '(%s)' % node.value
else:
return node.value
def visit_NewExpr(self, node):
s = 'new %s(%s)' % (
self.visit(node.identifier),
','.join(self.visit(arg) for arg in node.args)
)
return s
def visit_DotAccessor(self, node):
if getattr(node, '_parens', False):
template = '(%s.%s)'
else:
template = '%s.%s'
s = template % (self.visit(node.node), self.visit(node.identifier))
return s
def visit_BracketAccessor(self, node):
if isinstance(node.expr, ast.String):
value = node.expr.value
# remove single or double quotes around the value, but not both
if value.startswith("'"):
value = value.strip("'")
elif value.startswith('"'):
value = value.strip('"')
if _is_identifier(value):
s = '%s.%s' % (self.visit(node.node), value)
return s
s = '%s[%s]' % (self.visit(node.node), self.visit(node.expr))
return s
def visit_FunctionCall(self, node):
template = '%s(%s)'
if getattr(node, '_parens', False):
template = '(%s)' % template
s = template % (self.visit(node.identifier),
','.join(self.visit(arg) for arg in node.args))
return s
def visit_Object(self, node):
s = '{%s}' % ','.join(self.visit(prop) for prop in node.properties)
return s
def visit_Array(self, node):
s = '['
length = len(node.items) - 1
for index, item in enumerate(node.items):
if isinstance(item, ast.Elision):
s += ','
elif index != length:
s += self.visit(item) + ','
else:
s += self.visit(item)
s += ']'
return s
def visit_This(self, node):
return 'this'
|
mdiener/grace
|
grace/py27/slimit/visitors/minvisitor.py
|
Python
|
gpl-3.0
| 14,207
|
[
"VisIt"
] |
26f413206cd0b5dd8b92e95dd7aab93efdcd03a1f62e0cfc824ca15f4a1d360c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
def upgradeCheck(url):
# upgrade check:
# -------------
# On each startup OMERO.web checks for possible server upgrades
# and logs the upgrade url at the WARNING level. If you would
# like to disable the checks, please set 'omero.web.upgrades_url`
# to an empty string.
#
# For more information, see
# https://docs.openmicroscopy.org/latest/omero/sysadmins/UpgradeCheck.html
#
try:
from omero.util.upgrade_check import UpgradeCheck
if url:
check = UpgradeCheck("web", url=url)
check.run()
if check.isUpgradeNeeded():
logger.warn(
"Upgrade is available. Please visit"
" https://downloads.openmicroscopy.org/latest/omero/.\n")
else:
logger.debug("Up to date.\n")
except Exception, x:
logger.error("Upgrade check error: %s" % x)
|
knabar/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webadmin/webadmin_utils.py
|
Python
|
gpl-2.0
| 1,019
|
[
"VisIt"
] |
3741c614cdb4f4678125a2650af30706f69d46a1b762ae035488467b8d861060
|
import mdtraj as md
import numpy as np
from . import Featurizer, TrajFeatureUnion
class BaseSubsetFeaturizer(Featurizer):
"""Base class for featurizers that have a subset of active features.
n_features refers to the number of active features. n_max refers to the
number of possible features.
Parameters
----------
reference_traj : mdtraj.Trajectory
Reference Trajectory for checking consistency
subset : np.ndarray, default=None, dtype=int
The values in subset specify which of all possible features
Notes
-----
As an example, suppose we have an instance that has `n_max` = 5. This
means that the possible features are subsets of [0, 1, 2, 3, 4]. One possible
subset is then [0, 1, 3]. The allowed values of subset (e.g. `n_max`)
will be determined by the subclass--e.g. for example, `n_max` might be
the number of phi backbone angles.
"""
def __init__(self, reference_traj, subset=None):
self.reference_traj = reference_traj
if subset is not None:
self.subset = subset
else:
self.subset = np.zeros(0, 'int')
@property
def n_features(self):
return len(self.subset)
class SubsetAtomPairs(BaseSubsetFeaturizer):
"""Subset featurizer based on atom pair distances.
Parameters
----------
possible_pair_indices : np.ndarray, dtype=int, shape=(n_max, 2)
These are the possible atom indices to use for calculating interatomic
distances.
reference_traj : mdtraj.Trajectory
Reference Trajectory for checking consistency
subset : np.ndarray, default=None, dtype=int
The values in subset specify which of all possible features are
to be enabled. Specifically, atom pair distances are calculated
for the pairs `possible_pair_indices[subset]`
periodic : bool, optional, default=False
if True, use periodic boundary condition wrapping
exponent : float, optional, default=1.0
Use the distances to this power as the output feature.
See Also
--------
See `get_atompair_indices` for how one might generate acceptable atom pair
indices.
"""
def __init__(self, possible_pair_indices, reference_traj, subset=None, periodic=False, exponent=1.0):
super(SubsetAtomPairs, self).__init__(reference_traj, subset=subset)
self.possible_pair_indices = possible_pair_indices
self.periodic = periodic
self.exponent = exponent
if subset is None:
self.subset = np.zeros(0, 'int')
else:
self.subset = subset
@property
def n_max(self):
return len(self.possible_pair_indices)
def partial_transform(self, traj):
if self.n_features > 0:
features = md.geometry.compute_distances(traj, self.pair_indices, periodic=self.periodic) ** self.exponent
else:
features = np.zeros((traj.n_frames, 0))
return features
@property
def pair_indices(self):
return self.possible_pair_indices[self.subset]
class SubsetTrigFeaturizer(BaseSubsetFeaturizer):
"""Base class for featurizer based on dihedral sine or cosine.
Notes
-----
Subsets must be a subset of 0, ..., n_max - 1, where n_max is determined
by the number of respective phi / psi dihedrals in your protein, as
calcualted by mdtraj.compute_phi and mdtraj.compute_psi
"""
def partial_transform(self, traj):
if self.n_features > 0:
dih = md.geometry.dihedral.compute_dihedrals(traj, self.which_atom_ind[self.subset])
features = self.trig_function(dih)
else:
features = np.zeros((traj.n_frames, 0))
return features
@property
def n_max(self):
return len(self.which_atom_ind)
class CosMixin(object):
def trig_function(self, dihedrals):
return np.cos(dihedrals)
class SinMixin(object):
def trig_function(self, dihedrals):
return np.sin(dihedrals)
class PhiMixin(object):
@property
def which_atom_ind(self):
atom_indices, dih = md.geometry.dihedral.compute_phi(self.reference_traj)
return atom_indices
class PsiMixin(object):
@property
def which_atom_ind(self):
atom_indices, dih = md.geometry.dihedral.compute_psi(self.reference_traj)
return atom_indices
class SubsetCosPhiFeaturizer(SubsetTrigFeaturizer, CosMixin, PhiMixin):
pass
class SubsetCosPsiFeaturizer(SubsetTrigFeaturizer, CosMixin, PhiMixin):
pass
class SubsetSinPhiFeaturizer(SubsetTrigFeaturizer, SinMixin, PsiMixin):
pass
class SubsetSinPsiFeaturizer(SubsetTrigFeaturizer, SinMixin, PsiMixin):
pass
class SubsetFeatureUnion(TrajFeatureUnion):
"""Mixtape version of sklearn.pipeline.FeatureUnion with feature subset selection.
Notes
-----
Works on lists of trajectories.
Has a hacky convenience method to set all subsets at once.
"""
@property
def subsets(self):
return [featurizer.subset for (_, featurizer) in self.transformer_list]
@subsets.setter
def subsets(self, value):
assert len(value) == len(self.transformer_list), "wrong len"
for k, (_, featurizer) in enumerate(self.transformer_list):
featurizer.subset = value[k]
@property
def n_max_i(self):
return np.array([featurizer.n_max for (_, featurizer) in self.transformer_list])
@property
def n_features_i(self):
return np.array([featurizer.n_features for (_, featurizer) in self.transformer_list])
@property
def n_featurizers(self):
return len(self.transformer_list)
@property
def n_max(self):
return np.sum([featurizer.n_max for (_, featurizer) in self.transformer_list])
@property
def n_features(self):
return sum([featurizer.n_features for (_, featurizer) in self.transformer_list])
class DummyCV(object):
"""A cross-validation object that returns identical training and test sets."""
def __init__(self, n):
self.n = n
def __iter__(self):
yield np.arange(self.n), np.arange(self.n)
def __len__(self):
return self.n
|
stephenliu1989/msmbuilder
|
msmbuilder/featurizer/subset.py
|
Python
|
lgpl-2.1
| 6,212
|
[
"MDTraj"
] |
a22700b487d8b6c0eb9adf1e7ddfe5add8854f34ada8fda790b03394dd82498e
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Some helper functions
'''
import os, sys
import warnings
import tempfile
import functools
import itertools
import collections
import ctypes
import numpy
import h5py
from threading import Thread
from multiprocessing import Queue, Process
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
from pyscf.lib import param
from pyscf import __config__
if h5py.version.version[:4] == '2.2.':
sys.stderr.write('h5py-%s is found in your environment. '
'h5py-%s has bug in threading mode.\n'
'Async-IO is disabled.\n' % ((h5py.version.version,)*2))
c_double_p = ctypes.POINTER(ctypes.c_double)
c_int_p = ctypes.POINTER(ctypes.c_int)
c_null_ptr = ctypes.POINTER(ctypes.c_void_p)
def load_library(libname):
try:
_loaderpath = os.path.dirname(__file__)
return numpy.ctypeslib.load_library(libname, _loaderpath)
except OSError:
from pyscf import __path__ as ext_modules
for path in ext_modules:
libpath = os.path.join(path, 'lib')
if os.path.isdir(libpath):
for files in os.listdir(libpath):
if files.startswith(libname):
return numpy.ctypeslib.load_library(libname, libpath)
raise
#Fixme, the standard resouce module gives wrong number when objects are released
# http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/#fn:1
#or use slow functions as memory_profiler._get_memory did
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
def current_memory():
'''Return the size of used memory and allocated virtual memory (in MB)'''
#import resource
#return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
if sys.platform.startswith('linux'):
with open("/proc/%s/statm" % os.getpid()) as f:
vms, rss = [int(x)*PAGESIZE for x in f.readline().split()[:2]]
return rss/1e6, vms/1e6
else:
return 0, 0
def num_threads(n=None):
'''Set the number of OMP threads. If argument is not specified, the
function will return the total number of available OMP threads.
It's recommended to call this function to set OMP threads than
"os.environ['OMP_NUM_THREADS'] = int(n)". This is because environment
variables like OMP_NUM_THREADS were read when a module was imported. They
cannot be reset through os.environ after the module was loaded.
Examples:
>>> from pyscf import lib
>>> print(lib.num_threads())
8
>>> lib.num_threads(4)
4
>>> print(lib.num_threads())
4
'''
from pyscf.lib.numpy_helper import _np_helper
if n is not None:
_np_helper.set_omp_threads.restype = ctypes.c_int
threads = _np_helper.set_omp_threads(ctypes.c_int(int(n)))
if threads == 0:
warnings.warn('OpenMP is not available. '
'Setting omp_threads to %s has no effects.' % n)
return threads
else:
_np_helper.get_omp_threads.restype = ctypes.c_int
return _np_helper.get_omp_threads()
class with_omp_threads(object):
'''Using this macro to create a temporary context in which the number of
OpenMP threads are set to the required value. When the program exits the
context, the number OpenMP threads will be restored.
Args:
nthreads : int
Examples:
>>> from pyscf import lib
>>> print(lib.num_threads())
8
>>> with lib.with_omp_threads(2):
... print(lib.num_threads())
2
>>> print(lib.num_threads())
8
'''
def __init__(self, nthreads=None):
self.nthreads = nthreads
self.sys_threads = None
def __enter__(self):
if self.nthreads is not None and self.nthreads >= 1:
self.sys_threads = num_threads()
num_threads(self.nthreads)
return self
def __exit__(self, type, value, traceback):
if self.sys_threads is not None:
num_threads(self.sys_threads)
def c_int_arr(m):
npm = numpy.array(m).flatten('C')
arr = (ctypes.c_int * npm.size)(*npm)
# cannot return LP_c_double class,
#Xreturn npm.ctypes.data_as(c_int_p), which destructs npm before return
return arr
def f_int_arr(m):
npm = numpy.array(m).flatten('F')
arr = (ctypes.c_int * npm.size)(*npm)
return arr
def c_double_arr(m):
npm = numpy.array(m).flatten('C')
arr = (ctypes.c_double * npm.size)(*npm)
return arr
def f_double_arr(m):
npm = numpy.array(m).flatten('F')
arr = (ctypes.c_double * npm.size)(*npm)
return arr
def member(test, x, lst):
for l in lst:
if test(x, l):
return True
return False
def remove_dup(test, lst, from_end=False):
if test is None:
return set(lst)
else:
if from_end:
lst = list(reversed(lst))
seen = []
for l in lst:
if not member(test, l, seen):
seen.append(l)
return seen
def remove_if(test, lst):
return [x for x in lst if not test(x)]
def find_if(test, lst):
for l in lst:
if test(l):
return l
raise ValueError('No element of the given list matches the test condition.')
def arg_first_match(test, lst):
for i,x in enumerate(lst):
if test(x):
return i
raise ValueError('No element of the given list matches the test condition.')
def _balanced_partition(cum, ntasks):
segsize = float(cum[-1]) / ntasks
bounds = numpy.arange(ntasks+1) * segsize
displs = abs(bounds[:,None] - cum).argmin(axis=1)
return displs
def _blocksize_partition(cum, blocksize):
n = len(cum) - 1
displs = [0]
if n == 0:
return displs
p0 = 0
for i in range(1, n):
if cum[i+1]-cum[p0] > blocksize:
displs.append(i)
p0 = i
displs.append(n)
return displs
def flatten(lst):
'''flatten nested lists
x[0] + x[1] + x[2] + ...
Examples:
>>> flatten([[0, 2], [1], [[9, 8, 7]]])
[0, 2, 1, [9, 8, 7]]
'''
return list(itertools.chain.from_iterable(lst))
def prange(start, end, step):
'''This function splits the number sequence between "start" and "end"
using uniform "step" length. It yields the boundary (start, end) for each
fragment.
Examples:
>>> for p0, p1 in lib.prange(0, 8, 2):
... print(p0, p1)
(0, 2)
(2, 4)
(4, 6)
(6, 8)
'''
if start < end:
for i in range(start, end, step):
yield i, min(i+step, end)
def prange_tril(start, stop, blocksize):
'''Similar to :func:`prange`, yeilds start (p0) and end (p1) with the
restriction p1*(p1+1)/2-p0*(p0+1)/2 < blocksize
Examples:
>>> for p0, p1 in lib.prange_tril(0, 10, 25):
... print(p0, p1)
(0, 6)
(6, 9)
(9, 10)
'''
if start >= stop:
return []
idx = numpy.arange(start, stop+1)
cum_costs = idx*(idx+1)//2 - start*(start+1)//2
displs = [x+start for x in _blocksize_partition(cum_costs, blocksize)]
return zip(displs[:-1], displs[1:])
def map_with_prefetch(func, *iterables):
'''
Apply function to an task and prefetch the next task
'''
global_import_lock = False
if sys.version_info < (3, 6):
import imp
global_import_lock = imp.lock_held()
if not ASYNC_IO or global_import_lock:
for task in zip(*iterables):
yield func(*task)
elif ThreadPoolExecutor is not None:
with ThreadPoolExecutor(max_workers=1) as executor:
future = None
for task in zip(*iterables):
if future is None:
future = executor.submit(func, *task)
else:
result = future.result()
future = executor.submit(func, *task)
yield result
if future is not None:
yield future.result()
else:
def func_with_buf(_output_buf, *args):
_output_buf[0] = func(*args)
with call_in_background(func_with_buf) as f_prefetch:
buf0, buf1 = [None], [None]
for istep, task in enumerate(zip(*iterables)):
if istep == 0:
f_prefetch(buf0, *task)
else:
buf0, buf1 = buf1, buf0
f_prefetch(buf0, *task)
yield buf1[0]
if buf0[0] is not None:
yield buf0[0]
def index_tril_to_pair(ij):
'''Given tril-index ij, compute the pair indices (i,j) which satisfy
ij = i * (i+1) / 2 + j
'''
i = (numpy.sqrt(2*ij+.25) - .5 + 1e-7).astype(int)
j = ij - i*(i+1)//2
return i, j
def tril_product(*iterables, **kwds):
'''Cartesian product in lower-triangular form for multiple indices
For a given list of indices (`iterables`), this function yields all
indices such that the sub-indices given by the kwarg `tril_idx` satisfy a
lower-triangular form. The lower-triangular form satisfies:
.. math:: i[tril_idx[0]] >= i[tril_idx[1]] >= ... >= i[tril_idx[len(tril_idx)-1]]
Args:
*iterables: Variable length argument list of indices for the cartesian product
**kwds: Arbitrary keyword arguments. Acceptable keywords include:
repeat (int): Number of times to repeat the iterables
tril_idx (array_like): Indices to put into lower-triangular form.
Yields:
product (tuple): Tuple in lower-triangular form.
Examples:
Specifying no `tril_idx` is equivalent to just a cartesian product.
>>> list(tril_product(range(2), repeat=2))
[(0, 0), (0, 1), (1, 0), (1, 1)]
We can specify only sub-indices to satisfy a lower-triangular form:
>>> list(tril_product(range(2), repeat=3, tril_idx=[1,2]))
[(0, 0, 0), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 1, 0), (1, 1, 1)]
We specify all indices to satisfy a lower-triangular form, useful for iterating over
the symmetry unique elements of occupied/virtual orbitals in a 3-particle operator:
>>> list(tril_product(range(3), repeat=3, tril_idx=[0,1,2]))
[(0, 0, 0), (1, 0, 0), (1, 1, 0), (1, 1, 1), (2, 0, 0), (2, 1, 0), (2, 1, 1), (2, 2, 0), (2, 2, 1), (2, 2, 2)]
'''
repeat = kwds.get('repeat', 1)
tril_idx = kwds.get('tril_idx', [])
niterables = len(iterables) * repeat
ntril_idx = len(tril_idx)
assert ntril_idx <= niterables, 'Cant have a greater number of tril indices than iterables!'
if ntril_idx > 0:
assert numpy.max(tril_idx) < niterables, 'Tril index out of bounds for %d iterables! idx = %s' % \
(niterables, tril_idx)
for tup in itertools.product(*iterables, repeat=repeat):
if ntril_idx == 0:
yield tup
continue
if all([tup[tril_idx[i]] >= tup[tril_idx[i+1]] for i in range(ntril_idx-1)]):
yield tup
else:
pass
def square_mat_in_trilu_indices(n):
'''Return a n x n symmetric index matrix, in which the elements are the
indices of the unique elements of a tril vector
[0 1 3 ... ]
[1 2 4 ... ]
[3 4 5 ... ]
[... ]
'''
idx = numpy.tril_indices(n)
tril2sq = numpy.zeros((n,n), dtype=int)
tril2sq[idx[0],idx[1]] = tril2sq[idx[1],idx[0]] = numpy.arange(n*(n+1)//2)
return tril2sq
class capture_stdout(object):
'''redirect all stdout (c printf & python print) into a string
Examples:
>>> import os
>>> from pyscf import lib
>>> with lib.capture_stdout() as out:
... os.system('ls')
>>> print(out.read())
'''
#TODO: handle stderr
def __enter__(self):
sys.stdout.flush()
self._contents = None
self.old_stdout_fileno = sys.stdout.fileno()
self.bak_stdout_fd = os.dup(self.old_stdout_fileno)
self.ftmp = tempfile.NamedTemporaryFile(dir=param.TMPDIR)
os.dup2(self.ftmp.file.fileno(), self.old_stdout_fileno)
return self
def __exit__(self, type, value, traceback):
sys.stdout.flush()
self.ftmp.file.seek(0)
self._contents = self.ftmp.file.read()
self.ftmp.close()
os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)
os.close(self.bak_stdout_fd)
def read(self):
if self._contents:
return self._contents
else:
sys.stdout.flush()
self.ftmp.file.seek(0)
return self.ftmp.file.read()
ctypes_stdout = capture_stdout
class quite_run(object):
'''capture all stdout (c printf & python print) but output nothing
Examples:
>>> import os
>>> from pyscf import lib
>>> with lib.quite_run():
... os.system('ls')
'''
def __enter__(self):
sys.stdout.flush()
#TODO: to handle the redirected stdout e.g. StringIO()
self.old_stdout_fileno = sys.stdout.fileno()
self.bak_stdout_fd = os.dup(self.old_stdout_fileno)
self.fnull = open(os.devnull, 'wb')
os.dup2(self.fnull.fileno(), self.old_stdout_fileno)
def __exit__(self, type, value, traceback):
sys.stdout.flush()
os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)
self.fnull.close()
# from pygeocoder
# this decorator lets me use methods as both static and instance methods
# In contrast to classmethod, when obj.function() is called, the first
# argument is obj in omnimethod rather than obj.__class__ in classmethod
class omnimethod(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return functools.partial(self.func, instance)
SANITY_CHECK = getattr(__config__, 'SANITY_CHECK', True)
class StreamObject(object):
'''For most methods, there are three stream functions to pipe computing stream:
1 ``.set_`` function to update object attributes, eg
``mf = scf.RHF(mol).set(conv_tol=1e-5)`` is identical to proceed in two steps
``mf = scf.RHF(mol); mf.conv_tol=1e-5``
2 ``.run`` function to execute the kenerl function (the function arguments
are passed to kernel function). If keyword arguments is given, it will first
call ``.set`` function to update object attributes then execute the kernel
function. Eg
``mf = scf.RHF(mol).run(dm_init, conv_tol=1e-5)`` is identical to three steps
``mf = scf.RHF(mol); mf.conv_tol=1e-5; mf.kernel(dm_init)``
3 ``.apply`` function to apply the given function/class to the current object
(function arguments and keyword arguments are passed to the given function).
Eg
``mol.apply(scf.RHF).run().apply(mcscf.CASSCF, 6, 4, frozen=4)`` is identical to
``mf = scf.RHF(mol); mf.kernel(); mcscf.CASSCF(mf, 6, 4, frozen=4)``
'''
verbose = 0
stdout = sys.stdout
_keys = set(['verbose', 'stdout'])
def kernel(self, *args, **kwargs):
'''
Kernel function is the main driver of a method. Every method should
define the kernel function as the entry of the calculation. Note the
return value of kernel function is not strictly defined. It can be
anything related to the method (such as the energy, the wave-function,
the DFT mesh grids etc.).
'''
pass
def pre_kernel(self, envs):
'''
A hook to be run before the main body of kernel function is executed.
Internal variables are exposed to pre_kernel through the "envs"
dictionary. Return value of pre_kernel function is not required.
'''
pass
def post_kernel(self, envs):
'''
A hook to be run after the main body of the kernel function. Internal
variables are exposed to post_kernel through the "envs" dictionary.
Return value of post_kernel function is not required.
'''
pass
def run(self, *args, **kwargs):
'''
Call the kernel function of current object. `args` will be passed
to kernel function. `kwargs` will be used to update the attributes of
current object. The return value of method run is the object itself.
This allows a series of functions/methods to be executed in pipe.
'''
self.set(**kwargs)
self.kernel(*args)
return self
def set(self, *args, **kwargs):
'''
Update the attributes of the current object. The return value of
method set is the object itself. This allows a series of
functions/methods to be executed in pipe.
'''
if args:
warnings.warn('method set() only supports keyword arguments.\n'
'Arguments %s are ignored.' % args)
#if getattr(self, '_keys', None):
# for k,v in kwargs.items():
# setattr(self, k, v)
# if k not in self._keys:
# sys.stderr.write('Warning: %s does not have attribute %s\n'
# % (self.__class__, k))
#else:
for k,v in kwargs.items():
setattr(self, k, v)
return self
# An alias to .set method
__call__ = set
def apply(self, fn, *args, **kwargs):
'''
Apply the fn to rest arguments: return fn(*args, **kwargs). The
return value of method set is the object itself. This allows a series
of functions/methods to be executed in pipe.
'''
return fn(self, *args, **kwargs)
# def _format_args(self, args, kwargs, kernel_kw_lst):
# args1 = [kwargs.pop(k, v) for k, v in kernel_kw_lst]
# return args + args1[len(args):], kwargs
def check_sanity(self):
'''
Check input of class/object attributes, check whether a class method is
overwritten. It does not check the attributes which are prefixed with
"_". The
return value of method set is the object itself. This allows a series
of functions/methods to be executed in pipe.
'''
if (SANITY_CHECK and
self.verbose > 0 and # logger.QUIET
getattr(self, '_keys', None)):
check_sanity(self, self._keys, self.stdout)
return self
def view(self, cls):
'''New view of object with the same attributes.'''
obj = cls.__new__(cls)
obj.__dict__.update(self.__dict__)
return obj
def add_keys(self, **kwargs):
'''Add or update attributes of the object and register these attributes in ._keys'''
if kwargs:
self.__dict__.update(**kwargs)
self._keys = self._keys.union(kwargs.keys())
return self
_warn_once_registry = {}
def check_sanity(obj, keysref, stdout=sys.stdout):
'''Check misinput of class attributes, check whether a class method is
overwritten. It does not check the attributes which are prefixed with
"_".
'''
objkeys = [x for x in obj.__dict__ if not x.startswith('_')]
keysub = set(objkeys) - set(keysref)
if keysub:
class_attr = set(dir(obj.__class__))
keyin = keysub.intersection(class_attr)
if keyin:
msg = ('Overwritten attributes %s of %s\n' %
(' '.join(keyin), obj.__class__))
if msg not in _warn_once_registry:
_warn_once_registry[msg] = 1
sys.stderr.write(msg)
if stdout is not sys.stdout:
stdout.write(msg)
keydiff = keysub - class_attr
if keydiff:
msg = ('%s does not have attributes %s\n' %
(obj.__class__, ' '.join(keydiff)))
if msg not in _warn_once_registry:
_warn_once_registry[msg] = 1
sys.stderr.write(msg)
if stdout is not sys.stdout:
stdout.write(msg)
return obj
def with_doc(doc):
'''Use this decorator to add doc string for function
@with_doc(doc)
def fn:
...
is equivalent to
fn.__doc__ = doc
'''
def fn_with_doc(fn):
fn.__doc__ = doc
return fn
return fn_with_doc
def alias(fn, alias_name=None):
'''
The statement "fn1 = alias(fn)" in a class is equivalent to define the
following method in the class:
.. code-block:: python
def fn1(self, *args, **kwargs):
return self.fn(*args, **kwargs)
Using alias function instead of fn1 = fn because some methods may be
overloaded in the child class. Using "alias" can make sure that the
overloaded mehods were called when calling the aliased method.
'''
fname = fn.__name__
def aliased_fn(self, *args, **kwargs):
return getattr(self, fname)(*args, **kwargs)
if alias_name is not None:
aliased_fn.__name__ = alias_name
doc_str = 'An alias to method %s\n' % fname
if sys.version_info >= (3,):
from inspect import signature
sig = str(signature(fn))
if alias_name is None:
doc_str += 'Function Signature: %s\n' % sig
else:
doc_str += 'Function Signature: %s%s\n' % (alias_name, sig)
doc_str += '----------------------------------------\n\n'
if fn.__doc__ is not None:
doc_str += fn.__doc__
aliased_fn.__doc__ = doc_str
return aliased_fn
def class_as_method(cls):
'''
The statement "fn1 = alias(Class)" is equivalent to:
.. code-block:: python
def fn1(self, *args, **kwargs):
return Class(self, *args, **kwargs)
'''
def fn(obj, *args, **kwargs):
return cls(obj, *args, **kwargs)
fn.__doc__ = cls.__doc__
fn.__name__ = cls.__name__
fn.__module__ = cls.__module__
return fn
def overwrite_mro(obj, mro):
'''A hacky function to overwrite the __mro__ attribute'''
class HackMRO(type):
pass
# Overwrite type.mro function so that Temp class can use the given mro
HackMRO.mro = lambda self: mro
#if sys.version_info < (3,):
# class Temp(obj.__class__):
# __metaclass__ = HackMRO
#else:
# class Temp(obj.__class__, metaclass=HackMRO):
# pass
Temp = HackMRO(obj.__class__.__name__, obj.__class__.__bases__, obj.__dict__)
obj = Temp()
# Delete mro function otherwise all subclass of Temp are not able to
# resolve the right mro
del(HackMRO.mro)
return obj
def izip(*args):
'''python2 izip == python3 zip'''
if sys.version_info < (3,):
return itertools.izip(*args)
else:
return zip(*args)
class ProcessWithReturnValue(Process):
def __init__(self, group=None, target=None, name=None, args=(),
kwargs=None):
self._q = Queue()
self._e = None
def qwrap(*args, **kwargs):
try:
self._q.put(target(*args, **kwargs))
except BaseException as e:
self._e = e
raise e
Process.__init__(self, group, qwrap, name, args, kwargs)
def join(self):
Process.join(self)
if self._e is not None:
raise ProcessRuntimeError('Error on process %s:\n%s' % (self, self._e))
else:
return self._q.get()
get = join
class ProcessRuntimeError(RuntimeError):
pass
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(),
kwargs=None):
self._q = Queue()
self._e = None
def qwrap(*args, **kwargs):
try:
self._q.put(target(*args, **kwargs))
except BaseException as e:
self._e = e
raise e
Thread.__init__(self, group, qwrap, name, args, kwargs)
def join(self):
Thread.join(self)
if self._e is not None:
raise ThreadRuntimeError('Error on thread %s:\n%s' % (self, self._e))
else:
# Note: If the return value of target is huge, Queue.get may raise
# SystemError: NULL result without error in PyObject_Call
# It is because return value is cached somewhere by pickle but pickle is
# unable to handle huge amount of data.
return self._q.get()
get = join
class ThreadWithTraceBack(Thread):
def __init__(self, group=None, target=None, name=None, args=(),
kwargs=None):
self._e = None
def qwrap(*args, **kwargs):
try:
target(*args, **kwargs)
except BaseException as e:
self._e = e
raise e
Thread.__init__(self, group, qwrap, name, args, kwargs)
def join(self):
Thread.join(self)
if self._e is not None:
raise ThreadRuntimeError('Error on thread %s:\n%s' % (self, self._e))
class ThreadRuntimeError(RuntimeError):
pass
def background_thread(func, *args, **kwargs):
'''applying function in background'''
thread = ThreadWithReturnValue(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
def background_process(func, *args, **kwargs):
'''applying function in background'''
thread = ProcessWithReturnValue(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
bg = background = bg_thread = background_thread
bp = bg_process = background_process
ASYNC_IO = getattr(__config__, 'ASYNC_IO', True)
class call_in_background(object):
'''Within this macro, function(s) can be executed asynchronously (the
given functions are executed in background).
Attributes:
sync (bool): Whether to run in synchronized mode. The default value
is False (asynchoronized mode).
Examples:
>>> with call_in_background(fun) as async_fun:
... async_fun(a, b) # == fun(a, b)
... do_something_else()
>>> with call_in_background(fun1, fun2) as (afun1, afun2):
... afun2(a, b)
... do_something_else()
... afun2(a, b)
... do_something_else()
... afun1(a, b)
... do_something_else()
'''
def __init__(self, *fns, **kwargs):
self.fns = fns
self.executor = None
self.handlers = [None] * len(self.fns)
self.sync = kwargs.get('sync', not ASYNC_IO)
if h5py.version.version[:4] == '2.2.': # h5py-2.2.* has bug in threading mode
# Disable back-ground mode
def __enter__(self):
if len(self.fns) == 1:
return self.fns[0]
else:
return self.fns
else:
def __enter__(self):
fns = self.fns
handlers = self.handlers
ntasks = len(self.fns)
global_import_lock = False
if sys.version_info < (3, 6):
import imp
global_import_lock = imp.lock_held()
if self.sync or global_import_lock:
# Some modules like nosetests, coverage etc
# python -m unittest test_xxx.py or nosetests test_xxx.py
# hang when Python multi-threading was used in the import stage due to (Python
# import lock) bug in the threading module. See also
# https://github.com/paramiko/paramiko/issues/104
# https://docs.python.org/2/library/threading.html#importing-in-threaded-code
# Disable the asynchoronous mode for safe importing
def def_async_fn(i):
return fns[i]
elif ThreadPoolExecutor is None: # async mode, old python
def def_async_fn(i):
def async_fn(*args, **kwargs):
if self.handlers[i] is not None:
self.handlers[i].join()
self.handlers[i] = ThreadWithTraceBack(target=fns[i], args=args,
kwargs=kwargs)
self.handlers[i].start()
return self.handlers[i]
return async_fn
else: # multiple executors in async mode, python 2.7.12 or newer
executor = self.executor = ThreadPoolExecutor(max_workers=ntasks)
def def_async_fn(i):
def async_fn(*args, **kwargs):
if handlers[i] is not None:
try:
handlers[i].result()
except Exception as e:
raise ThreadRuntimeError('Error on thread %s:\n%s'
% (self, e))
handlers[i] = executor.submit(fns[i], *args, **kwargs)
return handlers[i]
return async_fn
if len(self.fns) == 1:
return def_async_fn(0)
else:
return [def_async_fn(i) for i in range(ntasks)]
def __exit__(self, type, value, traceback):
for handler in self.handlers:
if handler is not None:
try:
if ThreadPoolExecutor is None:
handler.join()
else:
handler.result()
except Exception as e:
raise ThreadRuntimeError('Error on thread %s:\n%s' % (self, e))
if self.executor is not None:
self.executor.shutdown(wait=True)
class H5TmpFile(h5py.File):
'''Create and return an HDF5 temporary file.
Kwargs:
filename : str or None
If a string is given, an HDF5 file of the given filename will be
created. The temporary file will exist even if the H5TmpFile
object is released. If nothing is specified, the HDF5 temporary
file will be deleted when the H5TmpFile object is released.
The return object is an h5py.File object. The file will be automatically
deleted when it is closed or the object is released (unless filename is
specified).
Examples:
>>> from pyscf import lib
>>> ftmp = lib.H5TmpFile()
'''
def __init__(self, filename=None, mode='a', *args, **kwargs):
if filename is None:
tmpfile = tempfile.NamedTemporaryFile(dir=param.TMPDIR)
filename = tmpfile.name
h5py.File.__init__(self, filename, mode, *args, **kwargs)
#FIXME: Does GC flush/close the HDF5 file when releasing the resource?
# To make HDF5 file reusable, file has to be closed or flushed
def __del__(self):
try:
self.close()
except AttributeError: # close not defined in old h5py
pass
except ValueError: # if close() is called twice
pass
except ImportError: # exit program before de-referring the object
pass
def fingerprint(a):
'''Fingerprint of numpy array'''
a = numpy.asarray(a)
return numpy.dot(numpy.cos(numpy.arange(a.size)), a.ravel())
finger = fp = fingerprint
def ndpointer(*args, **kwargs):
base = numpy.ctypeslib.ndpointer(*args, **kwargs)
@classmethod
def from_param(cls, obj):
if obj is None:
return obj
return base.from_param(obj)
return type(base.__name__, (base,), {'from_param': from_param})
# A tag to label the derived Scanner class
class SinglePointScanner: pass
class GradScanner:
def __init__(self, g):
self.__dict__.update(g.__dict__)
self.base = g.base.as_scanner()
@property
def e_tot(self):
return self.base.e_tot
@e_tot.setter
def e_tot(self, x):
self.base.e_tot = x
@property
def converged(self):
# Some base methods like MP2 does not have the attribute converged
conv = getattr(self.base, 'converged', True)
return conv
class temporary_env(object):
'''Within the context of this macro, the attributes of the object are
temporarily updated. When the program goes out of the scope of the
context, the original value of each attribute will be restored.
Examples:
>>> with temporary_env(lib.param, LIGHT_SPEED=15., BOHR=2.5):
... print(lib.param.LIGHT_SPEED, lib.param.BOHR)
15. 2.5
>>> print(lib.param.LIGHT_SPEED, lib.param.BOHR)
137.03599967994 0.52917721092
'''
def __init__(self, obj, **kwargs):
self.obj = obj
# Should I skip the keys which are not presented in obj?
#keys = [key for key in kwargs.keys() if hasattr(obj, key)]
#self.env_bak = [(key, getattr(obj, key, 'TO_DEL')) for key in keys]
#self.env_new = [(key, kwargs[key]) for key in keys]
self.env_bak = [(key, getattr(obj, key, 'TO_DEL')) for key in kwargs]
self.env_new = [(key, kwargs[key]) for key in kwargs]
def __enter__(self):
for k, v in self.env_new:
setattr(self.obj, k, v)
return self
def __exit__(self, type, value, traceback):
for k, v in self.env_bak:
if isinstance(v, str) and v == 'TO_DEL':
delattr(self.obj, k)
else:
setattr(self.obj, k, v)
class light_speed(temporary_env):
'''Within the context of this macro, the environment varialbe LIGHT_SPEED
can be customized.
Examples:
>>> with light_speed(15.):
... print(lib.param.LIGHT_SPEED)
15.
>>> print(lib.param.LIGHT_SPEED)
137.03599967994
'''
def __init__(self, c):
temporary_env.__init__(self, param, LIGHT_SPEED=c)
self.c = c
def __enter__(self):
temporary_env.__enter__(self)
return self.c
def repo_info(repo_path):
'''
Repo location, version, git branch and commit ID
'''
def git_version(orig_head, head, branch):
git_version = []
if orig_head:
git_version.append('GIT ORIG_HEAD %s' % orig_head)
if branch:
git_version.append('GIT HEAD (branch %s) %s' % (branch, head))
elif head:
git_version.append('GIT HEAD %s' % head)
return '\n'.join(git_version)
repo_path = os.path.abspath(repo_path)
if os.path.isdir(os.path.join(repo_path, '.git')):
git_str = git_version(*git_info(repo_path))
elif os.path.isdir(os.path.abspath(os.path.join(repo_path, '..', '.git'))):
repo_path = os.path.abspath(os.path.join(repo_path, '..'))
git_str = git_version(*git_info(repo_path))
else:
git_str = None
# TODO: Add info of BLAS, libcint, libxc, libxcfun, tblis if applicable
info = {'path': repo_path}
if git_str:
info['git'] = git_str
return info
def git_info(repo_path):
orig_head = None
head = None
branch = None
try:
with open(os.path.join(repo_path, '.git', 'ORIG_HEAD'), 'r') as f:
orig_head = f.read().strip()
except IOError:
pass
try:
head = os.path.join(repo_path, '.git', 'HEAD')
with open(head, 'r') as f:
head = f.read().splitlines()[0].strip()
if head.startswith('ref:'):
branch = os.path.basename(head)
with open(os.path.join(repo_path, '.git', head.split(' ')[1]), 'r') as f:
head = f.read().strip()
except IOError:
pass
return orig_head, head, branch
def isinteger(obj):
'''
Check if an object is an integer.
'''
# A bool is also an int in python, but we don't want that.
# On the other hand, numpy.bool_ is probably not a numpy.integer, but just to be sure...
if isinstance(obj, (bool, numpy.bool_)):
return False
# These are actual ints we expect to encounter.
else:
return isinstance(obj, (int, numpy.integer))
def issequence(obj):
'''
Determine if the object provided is a sequence.
'''
# These are the types of sequences that we permit.
# numpy.ndarray is not a subclass of collections.abc.Sequence as of version 1.19.
sequence_types = (collections.abc.Sequence, numpy.ndarray)
return isinstance(obj, sequence_types)
def isintsequence(obj):
'''
Determine if the object provided is a sequence of integers.
'''
if not issequence(obj):
return False
elif isinstance(obj, numpy.ndarray):
return issubclass(obj.dtype.type, numpy.integer)
else:
are_ints = True
for i in obj:
are_ints = are_ints and isinteger(i)
return are_ints
if __name__ == '__main__':
for i,j in prange_tril(0, 90, 300):
print(i, j, j*(j+1)//2-i*(i+1)//2)
|
sunqm/pyscf
|
pyscf/lib/misc.py
|
Python
|
apache-2.0
| 37,333
|
[
"PySCF"
] |
b98dea30737ac788d1d0f76068c4dbc812d100b8f0ecae47541ddc0f10aa458e
|
from .unit_definitions import (
percent, percents,
permille,
rad, radian, radians,
deg, degree, degrees,
sr, steradian, steradians,
mil, angular_mil, angular_mils,
m, meter, meters,
kg, kilogram, kilograms,
s, second, seconds,
A, ampere, amperes,
K, kelvin, kelvins,
mol, mole, moles,
cd, candela, candelas,
g, gram, grams,
mg, milligram, milligrams,
ug, microgram, micrograms,
newton, newtons, N,
joule, joules, J,
watt, watts, W,
pascal, pascals, Pa, pa,
hertz, hz, Hz,
coulomb, coulombs, C,
volt, volts, v, V,
ohm, ohms,
siemens, S, mho, mhos,
farad, farads, F,
henry, henrys, H,
tesla, teslas, T,
weber, webers, Wb, wb,
optical_power, dioptre, D,
lux, lx,
katal, kat,
gray, Gy,
becquerel, Bq,
km, kilometer, kilometers,
dm, decimeter, decimeters,
cm, centimeter, centimeters,
mm, millimeter, millimeters,
um, micrometer, micrometers, micron, microns,
nm, nanometer, nanometers,
pm, picometer, picometers,
ft, foot, feet,
inch, inches,
yd, yard, yards,
mi, mile, miles,
nmi, nautical_mile, nautical_miles,
l, liter, liters,
dl, deciliter, deciliters,
cl, centiliter, centiliters,
ml, milliliter, milliliters,
ms, millisecond, milliseconds,
us, microsecond, microseconds,
ns, nanosecond, nanoseconds,
ps, picosecond, picoseconds,
minute, minutes,
h, hour, hours,
day, days,
anomalistic_year, anomalistic_years,
sidereal_year, sidereal_years,
tropical_year, tropical_years,
common_year, common_years,
julian_year, julian_years,
draconic_year, draconic_years,
gaussian_year, gaussian_years,
full_moon_cycle, full_moon_cycles,
year, years,
G, gravitational_constant,
c, speed_of_light,
elementary_charge,
hbar,
planck,
eV, electronvolt, electronvolts,
avogadro_number,
avogadro, avogadro_constant,
boltzmann, boltzmann_constant,
stefan, stefan_boltzmann_constant,
R, molar_gas_constant,
faraday_constant,
josephson_constant,
von_klitzing_constant,
amu, amus, atomic_mass_unit, atomic_mass_constant,
gee, gees, acceleration_due_to_gravity,
u0, magnetic_constant, vacuum_permeability,
e0, electric_constant, vacuum_permittivity,
Z0, vacuum_impedance,
coulomb_constant, coulombs_constant, electric_force_constant,
atmosphere, atmospheres, atm,
kPa, kilopascal,
bar, bars,
pound, pounds,
psi,
dHg0,
mmHg, torr,
mmu, mmus, milli_mass_unit,
quart, quarts,
ly, lightyear, lightyears,
au, astronomical_unit, astronomical_units,
planck_mass,
planck_time,
planck_temperature,
planck_length,
planck_charge,
planck_area,
planck_volume,
planck_momentum,
planck_energy,
planck_force,
planck_power,
planck_density,
planck_energy_density,
planck_intensity,
planck_angular_frequency,
planck_pressure,
planck_current,
planck_voltage,
planck_impedance,
planck_acceleration,
bit, bits,
byte,
kibibyte, kibibytes,
mebibyte, mebibytes,
gibibyte, gibibytes,
tebibyte, tebibytes,
pebibyte, pebibytes,
exbibyte, exbibytes,
curie, rutherford
)
__all__ = [
'percent', 'percents',
'permille',
'rad', 'radian', 'radians',
'deg', 'degree', 'degrees',
'sr', 'steradian', 'steradians',
'mil', 'angular_mil', 'angular_mils',
'm', 'meter', 'meters',
'kg', 'kilogram', 'kilograms',
's', 'second', 'seconds',
'A', 'ampere', 'amperes',
'K', 'kelvin', 'kelvins',
'mol', 'mole', 'moles',
'cd', 'candela', 'candelas',
'g', 'gram', 'grams',
'mg', 'milligram', 'milligrams',
'ug', 'microgram', 'micrograms',
'newton', 'newtons', 'N',
'joule', 'joules', 'J',
'watt', 'watts', 'W',
'pascal', 'pascals', 'Pa', 'pa',
'hertz', 'hz', 'Hz',
'coulomb', 'coulombs', 'C',
'volt', 'volts', 'v', 'V',
'ohm', 'ohms',
'siemens', 'S', 'mho', 'mhos',
'farad', 'farads', 'F',
'henry', 'henrys', 'H',
'tesla', 'teslas', 'T',
'weber', 'webers', 'Wb', 'wb',
'optical_power', 'dioptre', 'D',
'lux', 'lx',
'katal', 'kat',
'gray', 'Gy',
'becquerel', 'Bq',
'km', 'kilometer', 'kilometers',
'dm', 'decimeter', 'decimeters',
'cm', 'centimeter', 'centimeters',
'mm', 'millimeter', 'millimeters',
'um', 'micrometer', 'micrometers', 'micron', 'microns',
'nm', 'nanometer', 'nanometers',
'pm', 'picometer', 'picometers',
'ft', 'foot', 'feet',
'inch', 'inches',
'yd', 'yard', 'yards',
'mi', 'mile', 'miles',
'nmi', 'nautical_mile', 'nautical_miles',
'l', 'liter', 'liters',
'dl', 'deciliter', 'deciliters',
'cl', 'centiliter', 'centiliters',
'ml', 'milliliter', 'milliliters',
'ms', 'millisecond', 'milliseconds',
'us', 'microsecond', 'microseconds',
'ns', 'nanosecond', 'nanoseconds',
'ps', 'picosecond', 'picoseconds',
'minute', 'minutes',
'h', 'hour', 'hours',
'day', 'days',
'anomalistic_year', 'anomalistic_years',
'sidereal_year', 'sidereal_years',
'tropical_year', 'tropical_years',
'common_year', 'common_years',
'julian_year', 'julian_years',
'draconic_year', 'draconic_years',
'gaussian_year', 'gaussian_years',
'full_moon_cycle', 'full_moon_cycles',
'year', 'years',
'G', 'gravitational_constant',
'c', 'speed_of_light',
'elementary_charge',
'hbar',
'planck',
'eV', 'electronvolt', 'electronvolts',
'avogadro_number',
'avogadro', 'avogadro_constant',
'boltzmann', 'boltzmann_constant',
'stefan', 'stefan_boltzmann_constant',
'R', 'molar_gas_constant',
'faraday_constant',
'josephson_constant',
'von_klitzing_constant',
'amu', 'amus', 'atomic_mass_unit', 'atomic_mass_constant',
'gee', 'gees', 'acceleration_due_to_gravity',
'u0', 'magnetic_constant', 'vacuum_permeability',
'e0', 'electric_constant', 'vacuum_permittivity',
'Z0', 'vacuum_impedance',
'coulomb_constant', 'coulombs_constant', 'electric_force_constant',
'atmosphere', 'atmospheres', 'atm',
'kPa', 'kilopascal',
'bar', 'bars',
'pound', 'pounds',
'psi',
'dHg0',
'mmHg', 'torr',
'mmu', 'mmus', 'milli_mass_unit',
'quart', 'quarts',
'ly', 'lightyear', 'lightyears',
'au', 'astronomical_unit', 'astronomical_units',
'planck_mass',
'planck_time',
'planck_temperature',
'planck_length',
'planck_charge',
'planck_area',
'planck_volume',
'planck_momentum',
'planck_energy',
'planck_force',
'planck_power',
'planck_density',
'planck_energy_density',
'planck_intensity',
'planck_angular_frequency',
'planck_pressure',
'planck_current',
'planck_voltage',
'planck_impedance',
'planck_acceleration',
'bit', 'bits',
'byte',
'kibibyte', 'kibibytes',
'mebibyte', 'mebibytes',
'gibibyte', 'gibibytes',
'tebibyte', 'tebibytes',
'pebibyte', 'pebibytes',
'exbibyte', 'exbibytes',
'curie', 'rutherford',
]
|
kaushik94/sympy
|
sympy/physics/units/definitions/__init__.py
|
Python
|
bsd-3-clause
| 7,194
|
[
"Avogadro"
] |
9464395281db87a76236d021418c8c95618dbc0923654cf9492984f870f74a94
|
from json import dumps
from typing import Optional
from pytest import raises
from graphql.error import GraphQLSyntaxError
from graphql.language import Lexer, Source, TokenKind, parse
from graphql.utilities import strip_ignored_characters
from ..fixtures import kitchen_sink_query, kitchen_sink_sdl # noqa: F401
from ..utils import dedent
ignored_tokens = [
# UnicodeBOM
"\uFEFF", # Byte Order Mark (U+FEFF)
# WhiteSpace
"\t", # Horizontal Tab (U+0009)
" ", # Space (U+0020)
# LineTerminator
"\n", # "New Line (U+000A)"
"\r", # "Carriage Return (U+000D)" [ lookahead ! "New Line (U+000A)" ]
"\r\n", # "Carriage Return (U+000D)" "New Line (U+000A)"
# Comment
'# "Comment" string\n', # `#` CommentChar*
# Comma
",", # ,
]
punctuator_tokens = ["!", "$", "(", ")", "...", ":", "=", "@", "[", "]", "{", "|", "}"]
non_punctuator_tokens = [
"name_token", # Name
"1", # IntValue
"3.14", # FloatValue
'"some string value"', # StringValue
'"""block\nstring\nvalue"""', # StringValue(BlockString)
]
def lex_value(s: str) -> Optional[str]:
lexer = Lexer(Source(s))
value = lexer.advance().value
assert lexer.advance().kind == TokenKind.EOF, "Expected EOF"
return value
class ExpectStripped:
def __init__(self, doc_string: str):
self.doc_string = doc_string
def to_equal(self, expected: str):
doc_string = self.doc_string
stripped = strip_ignored_characters(doc_string)
assert stripped == expected, dedent(
f"""
Expected strip_ignored_characters({doc_string!r})
to equal {expected!r}
but got {stripped!r}
"""
)
stripped_twice = strip_ignored_characters(stripped)
assert stripped == stripped_twice, dedent(
f""""
Expected strip_ignored_characters({stripped!r})"
to equal {stripped!r}
but got {stripped_twice!r}
"""
)
def to_stay_the_same(self):
self.to_equal(self.doc_string)
def describe_strip_ignored_characters():
def strips_ignored_characters_from_graphql_query_document():
query = dedent(
"""
query SomeQuery($foo: String!, $bar: String) {
someField(foo: $foo, bar: $bar) {
a
b {
c
d
}
}
}
"""
)
assert strip_ignored_characters(query) == (
"query SomeQuery($foo:String!$bar:String)"
"{someField(foo:$foo bar:$bar){a b{c d}}}"
)
def strips_ignored_characters_from_graphql_sdl_document():
sdl = dedent(
'''
"""
Type description
"""
type Foo {
"""
Field description
"""
bar: String
}
'''
)
assert strip_ignored_characters(sdl) == (
'"""Type description""" type Foo{"""Field description""" bar:String}'
)
def report_document_with_invalid_token():
with raises(GraphQLSyntaxError) as exc_info:
strip_ignored_characters('{ foo(arg: "\n"')
assert str(exc_info.value) == dedent(
"""
Syntax Error: Unterminated string.
GraphQL request:1:13
1 | { foo(arg: "
| ^
2 | "
"""
)
def strips_non_parsable_document():
ExpectStripped('{ foo(arg: "str"').to_equal('{foo(arg:"str"')
def strips_documents_with_only_ignored_characters():
ExpectStripped("\n").to_equal("")
ExpectStripped(",").to_equal("")
ExpectStripped(",,").to_equal("")
ExpectStripped("#comment\n, \n").to_equal("")
for ignored in ignored_tokens:
ExpectStripped(ignored).to_equal("")
for another_ignored in ignored_tokens:
ExpectStripped(ignored + another_ignored).to_equal("")
ExpectStripped("".join(ignored_tokens)).to_equal("")
def strips_leading_and_trailing_ignored_tokens():
ExpectStripped("\n1").to_equal("1")
ExpectStripped(",1").to_equal("1")
ExpectStripped(",,1").to_equal("1")
ExpectStripped("#comment\n, \n1").to_equal("1")
ExpectStripped("1\n").to_equal("1")
ExpectStripped("1,").to_equal("1")
ExpectStripped("1,,").to_equal("1")
ExpectStripped("1#comment\n, \n").to_equal("1")
for token in punctuator_tokens + non_punctuator_tokens:
for ignored in ignored_tokens:
ExpectStripped(ignored + token).to_equal(token)
ExpectStripped(token + ignored).to_equal(token)
for another_ignored in ignored_tokens:
ExpectStripped(token + ignored + ignored).to_equal(token)
ExpectStripped(ignored + another_ignored + token).to_equal(token)
ExpectStripped("".join(ignored_tokens) + token).to_equal(token)
ExpectStripped(token + "".join(ignored_tokens)).to_equal(token)
def strips_ignored_tokens_between_punctuator_tokens():
ExpectStripped("[,)").to_equal("[)")
ExpectStripped("[\r)").to_equal("[)")
ExpectStripped("[\r\r)").to_equal("[)")
ExpectStripped("[\r,)").to_equal("[)")
ExpectStripped("[,\n)").to_equal("[)")
for left in punctuator_tokens:
for right in punctuator_tokens:
for ignored in ignored_tokens:
ExpectStripped(left + ignored + right).to_equal(left + right)
for another_ignored in ignored_tokens:
ExpectStripped(
left + ignored + another_ignored + right
).to_equal(left + right)
ExpectStripped(left + "".join(ignored_tokens) + right).to_equal(
left + right
)
def strips_ignored_tokens_between_punctuator_and_non_punctuator_tokens():
ExpectStripped("[,1").to_equal("[1")
ExpectStripped("[\r1").to_equal("[1")
ExpectStripped("[\r\r1").to_equal("[1")
ExpectStripped("[\r,1").to_equal("[1")
ExpectStripped("[,\n1").to_equal("[1")
for non_punctuator in non_punctuator_tokens:
for punctuator in punctuator_tokens:
for ignored in ignored_tokens:
ExpectStripped(punctuator + ignored + non_punctuator).to_equal(
punctuator + non_punctuator
)
for another_ignored in ignored_tokens:
ExpectStripped(
punctuator + ignored + another_ignored + non_punctuator
).to_equal(punctuator + non_punctuator)
ExpectStripped(
punctuator + "".join(ignored_tokens) + non_punctuator
).to_equal(punctuator + non_punctuator)
def strips_ignored_tokens_between_non_punctuator_and_punctuator_tokens():
ExpectStripped("1,[").to_equal("1[")
ExpectStripped("1\r[").to_equal("1[")
ExpectStripped("1\r\r[").to_equal("1[")
ExpectStripped("1\r,[").to_equal("1[")
ExpectStripped("1,\n[").to_equal("1[")
for non_punctuator in non_punctuator_tokens:
for punctuator in punctuator_tokens:
# Special case for that is handled in the below test
if punctuator == "...":
continue
for ignored in ignored_tokens:
ExpectStripped(non_punctuator + ignored + punctuator).to_equal(
non_punctuator + punctuator
)
for another_ignored in ignored_tokens:
ExpectStripped(
non_punctuator + ignored + another_ignored + punctuator
).to_equal(non_punctuator + punctuator)
ExpectStripped(
non_punctuator + "".join(ignored_tokens) + punctuator
).to_equal(non_punctuator + punctuator)
def replace_ignored_tokens_between_non_punctuator_tokens_and_spread_with_space():
ExpectStripped("a ...").to_equal("a ...")
ExpectStripped("1 ...").to_equal("1 ...")
ExpectStripped("1 ... ...").to_equal("1 ......")
for non_punctuator in non_punctuator_tokens:
for ignored in ignored_tokens:
ExpectStripped(non_punctuator + ignored + "...").to_equal(
non_punctuator + " ..."
)
for another_ignored in ignored_tokens:
ExpectStripped(
non_punctuator + ignored + another_ignored + " ..."
).to_equal(non_punctuator + " ...")
ExpectStripped(non_punctuator + "".join(ignored_tokens) + "...").to_equal(
non_punctuator + " ..."
)
def replace_ignored_tokens_between_non_punctuator_tokens_with_space():
ExpectStripped("1 2").to_stay_the_same()
ExpectStripped('"" ""').to_stay_the_same()
ExpectStripped("a b").to_stay_the_same()
ExpectStripped("a,1").to_equal("a 1")
ExpectStripped("a,,1").to_equal("a 1")
ExpectStripped("a 1").to_equal("a 1")
ExpectStripped("a \t 1").to_equal("a 1")
for left in non_punctuator_tokens:
for right in non_punctuator_tokens:
for ignored in ignored_tokens:
ExpectStripped(left + ignored + right).to_equal(left + " " + right)
for another_ignored in ignored_tokens:
ExpectStripped(
left + ignored + another_ignored + right
).to_equal(left + " " + right)
ExpectStripped(left + "".join(ignored_tokens) + right).to_equal(
left + " " + right
)
def does_not_strip_ignored_tokens_embedded_in_the_string():
ExpectStripped('" "').to_stay_the_same()
ExpectStripped('","').to_stay_the_same()
ExpectStripped('",,"').to_stay_the_same()
ExpectStripped('",|"').to_stay_the_same()
for ignored in ignored_tokens:
ExpectStripped(dumps(ignored)).to_stay_the_same()
for another_ignored in ignored_tokens:
ExpectStripped(dumps(ignored + another_ignored)).to_stay_the_same()
ExpectStripped(dumps("".join(ignored_tokens))).to_stay_the_same()
def does_not_strip_ignored_tokens_embedded_in_the_block_string():
ExpectStripped('""","""').to_stay_the_same()
ExpectStripped('""",,"""').to_stay_the_same()
ExpectStripped('""",|"""').to_stay_the_same()
ignored_tokens_without_formatting = [
token
for token in ignored_tokens
if token not in ["\n", "\r", "\r\n", "\t", " "]
]
for ignored in ignored_tokens_without_formatting:
ExpectStripped('"""|' + ignored + '|"""').to_stay_the_same()
for another_ignored in ignored_tokens_without_formatting:
ExpectStripped(
'"""|' + ignored + another_ignored + '|"""'
).to_stay_the_same()
ExpectStripped(
'"""|' + "".join(ignored_tokens_without_formatting) + '|"""'
).to_stay_the_same()
def strips_ignored_characters_inside_block_strings():
# noinspection PyShadowingNames
def expect_stripped_string(block_str: str):
original_value = lex_value(block_str)
stripped_value = lex_value(strip_ignored_characters(block_str))
assert original_value == stripped_value, dedent(
f"""
Expected lexValue(stripIgnoredCharacters({block_str!r})
to equal {original_value!r}
but got {stripped_value!r}
"""
)
return ExpectStripped(block_str)
expect_stripped_string('""""""').to_stay_the_same()
expect_stripped_string('""" """').to_equal('""""""')
expect_stripped_string('"""a"""').to_stay_the_same()
expect_stripped_string('""" a"""').to_equal('""" a"""')
expect_stripped_string('""" a """').to_equal('""" a """')
expect_stripped_string('"""\n"""').to_equal('""""""')
expect_stripped_string('"""a\nb"""').to_equal('"""a\nb"""')
expect_stripped_string('"""a\rb"""').to_equal('"""a\nb"""')
expect_stripped_string('"""a\r\nb"""').to_equal('"""a\nb"""')
expect_stripped_string('"""a\r\n\nb"""').to_equal('"""a\n\nb"""')
expect_stripped_string('"""\\\n"""').to_stay_the_same()
expect_stripped_string('""""\n"""').to_stay_the_same()
expect_stripped_string('"""\\"""\n"""').to_equal('"""\\""""""')
expect_stripped_string('"""\na\n b"""').to_stay_the_same()
expect_stripped_string('"""\n a\n b"""').to_equal('"""a\nb"""')
expect_stripped_string('"""\na\n b\nc"""').to_equal('"""a\n b\nc"""')
# noinspection PyShadowingNames
def strips_kitchen_sink_query_but_maintains_the_exact_same_ast(
kitchen_sink_query, # noqa: F811
):
stripped_query = strip_ignored_characters(kitchen_sink_query)
assert strip_ignored_characters(stripped_query) == stripped_query
query_ast = parse(kitchen_sink_query, no_location=True)
stripped_ast = parse(stripped_query, no_location=True)
assert stripped_ast == query_ast
# noinspection PyShadowingNames
def strips_kitchen_sink_sdl_but_maintains_the_exact_same_ast(
kitchen_sink_sdl, # noqa: F811
):
stripped_sdl = strip_ignored_characters(kitchen_sink_sdl)
assert strip_ignored_characters(stripped_sdl) == stripped_sdl
sdl_ast = parse(kitchen_sink_sdl, no_location=True)
stripped_ast = parse(stripped_sdl, no_location=True)
assert stripped_ast == sdl_ast
|
graphql-python/graphql-core
|
tests/utilities/test_strip_ignored_characters.py
|
Python
|
mit
| 14,139
|
[
"FEFF"
] |
88b6344b317f0c80dcdabbf7a14bbb63334d195ec8e5f05f63bcba560b59b61d
|
import requests, re, random
from bs4 import BeautifulSoup
COMEDIAN_NAMES = {'Seth': 'Seth Meyers',
'Letterman': 'David Letterman',
'Kimmel': 'Jimmy Kimmel',
'Conan': 'Conan O\'Brian',
'Fallon': 'Jimmy Fallon',
'Ferguson': "Craig Ferguson"}
def get_name(string):
for name in COMEDIAN_NAMES:
if len(re.findall(name, string)):
return COMEDIAN_NAMES[name]
def monologue(text):
"""!monologue: joke from night shows """
match = re.match(r"!monologue", text)
if not match:
return False
monologue_dict = {}
r = requests.get('http://www.newsmax.com/jokes/')
soup = BeautifulSoup(r.text)
jokepage = soup.body.find('div', 'jokespage')
for comedian in jokepage.find_all('div'):
if 'jokesHeader' not in comedian.attrs['class']:
break
img_name = comedian.find('img').attrs.get('alt')
comedian_name = get_name(img_name)
monologue = comedian.find_next('p')
while(monologue.name == 'p'):
monologue_dict.setdefault(comedian_name, []).append(monologue.text)
monologue = monologue.find_next()
name = random.choice(monologue_dict.keys())
monologue = random.choice(monologue_dict[name])
return monologue + ' --' + name
def hedberg_joke(text):
match = re.match(r"!hedberg", text)
if not match:
return False
url = "https://raw.githubusercontent.com/petdance/scraps/master/mitch-fortunes.txt"
r = requests.get(url)
if r.status_code != 200:
return "Error"
jokes = r.text.split('%')
return random.choice(jokes)
def on_message(msg, server):
text = msg.get("text", "")
return monologue(text) or hedberg_joke(text)
|
mmisiewicz/slask
|
limbo/plugins/monologue.py
|
Python
|
mit
| 1,791
|
[
"Brian"
] |
12d64c772a5e5e21cd89400d4bccf2529e297df0b351c0169dc9efdb6e1c553e
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Iris' data model representation of CF UGrid's Mesh and its constituent parts.
Eventual destination: dedicated module in :mod:`iris` root.
"""
from abc import ABC, abstractmethod
from collections import namedtuple
from collections.abc import Container
from typing import Iterable
from cf_units import Unit
from dask import array as da
import numpy as np
from ... import _lazy_data as _lazy
from ...common import (
CFVariableMixin,
metadata_filter,
metadata_manager_factory,
)
from ...common.metadata import BaseMetadata
from ...config import get_logger
from ...coords import AuxCoord, _DimensionalMetadata
from ...exceptions import ConnectivityNotFoundError, CoordinateNotFoundError
from ...util import array_equal, clip_string, guess_coord_axis
from .metadata import ConnectivityMetadata, MeshCoordMetadata, MeshMetadata
# Configure the logger.
logger = get_logger(__name__, propagate=True, handler=False)
#: Numpy "threshold" printoptions default argument.
NP_PRINTOPTIONS_THRESHOLD = 10
#: Numpy "edgeitems" printoptions default argument.
NP_PRINTOPTIONS_EDGEITEMS = 2
#
# Mesh dimension names namedtuples.
#
#: Namedtuple for 1D mesh topology NetCDF variable dimension names.
Mesh1DNames = namedtuple("Mesh1DNames", ["node_dimension", "edge_dimension"])
#: Namedtuple for 2D mesh topology NetCDF variable dimension names.
Mesh2DNames = namedtuple(
"Mesh2DNames", ["node_dimension", "edge_dimension", "face_dimension"]
)
#
# Mesh coordinate manager namedtuples.
#
#: Namedtuple for 1D mesh :class:`~iris.coords.AuxCoord` coordinates.
Mesh1DCoords = namedtuple(
"Mesh1DCoords", ["node_x", "node_y", "edge_x", "edge_y"]
)
#: Namedtuple for 2D mesh :class:`~iris.coords.AuxCoord` coordinates.
Mesh2DCoords = namedtuple(
"Mesh2DCoords",
["node_x", "node_y", "edge_x", "edge_y", "face_x", "face_y"],
)
#: Namedtuple for ``node`` :class:`~iris.coords.AuxCoord` coordinates.
MeshNodeCoords = namedtuple("MeshNodeCoords", ["node_x", "node_y"])
#: Namedtuple for ``edge`` :class:`~iris.coords.AuxCoord` coordinates.
MeshEdgeCoords = namedtuple("MeshEdgeCoords", ["edge_x", "edge_y"])
#: Namedtuple for ``face`` :class:`~iris.coords.AuxCoord` coordinates.
MeshFaceCoords = namedtuple("MeshFaceCoords", ["face_x", "face_y"])
#
# Mesh connectivity manager namedtuples.
#
#: Namedtuple for 1D mesh :class:`~iris.experimental.ugrid.mesh.Connectivity` instances.
Mesh1DConnectivities = namedtuple("Mesh1DConnectivities", ["edge_node"])
#: Namedtuple for 2D mesh :class:`~iris.experimental.ugrid.mesh.Connectivity` instances.
Mesh2DConnectivities = namedtuple(
"Mesh2DConnectivities",
[
"face_node",
"edge_node",
"face_edge",
"face_face",
"edge_face",
"boundary_node",
],
)
class Connectivity(_DimensionalMetadata):
"""
A CF-UGRID topology connectivity, describing the topological relationship
between two types of mesh element. One or more connectivities make up a
CF-UGRID topology - a constituent of a CF-UGRID mesh.
See: https://ugrid-conventions.github.io/ugrid-conventions
"""
UGRID_CF_ROLES = [
"edge_node_connectivity",
"face_node_connectivity",
"face_edge_connectivity",
"face_face_connectivity",
"edge_face_connectivity",
"boundary_node_connectivity",
"volume_node_connectivity",
"volume_edge_connectivity",
"volume_face_connectivity",
"volume_volume_connectivity",
]
def __init__(
self,
indices,
cf_role,
standard_name=None,
long_name=None,
var_name=None,
units=None,
attributes=None,
start_index=0,
location_axis=0,
):
"""
Constructs a single connectivity.
Args:
* indices (numpy.ndarray or numpy.ma.core.MaskedArray or dask.array.Array):
2D array giving the topological connection relationship between
:attr:`location` elements and :attr:`connected` elements.
The :attr:`location_axis` dimension indexes over the
:attr:`location` dimension of the mesh - i.e. its length matches
the total number of :attr:`location` elements in the mesh. The
:attr:`connected_axis` dimension can be any length, corresponding
to the highest number of :attr:`connected` elements connected to a
:attr:`location` element. The array values are indices into the
:attr:`connected` dimension of the mesh. If the number of
:attr:`connected` elements varies between :attr:`location`
elements: use a :class:`numpy.ma.core.MaskedArray` and mask the
:attr:`location` elements' unused index 'slots'. Use a
:class:`dask.array.Array` to keep indices 'lazy'.
* cf_role (str):
Denotes the topological relationship that this connectivity
describes. Made up of this array's :attr:`location`, and the
:attr:`connected` element type that is indexed by the array.
See :attr:`UGRID_CF_ROLES` for valid arguments.
Kwargs:
* standard_name (str):
CF standard name of the connectivity.
(NOTE: this is not expected by the UGRID conventions, but will be
handled in Iris' standard way if provided).
* long_name (str):
Descriptive name of the connectivity.
* var_name (str):
The NetCDF variable name for the connectivity.
* units (cf_units.Unit):
The :class:`~cf_units.Unit` of the connectivity's values.
Can be a string, which will be converted to a Unit object.
(NOTE: this is not expected by the UGRID conventions, but will be
handled in Iris' standard way if provided).
* attributes (dict):
A dictionary containing other cf and user-defined attributes.
* start_index (int):
Either ``0`` or ``1``. Default is ``0``. Denotes whether
:attr:`indices` uses 0-based or 1-based indexing (allows support
for Fortran and legacy NetCDF files).
* location_axis (int):
Either ``0`` or ``1``. Default is ``0``. Denotes which axis
of :attr:`indices` varies over the :attr:`location` elements (the
alternate axis therefore varying over :attr:`connected` elements).
(This parameter allows support for fastest varying index being
either first or last).
E.g. for ``face_node_connectivity``, for 10 faces:
``indices.shape[location_axis] == 10``.
"""
def validate_arg_vs_list(arg_name, arg, valid_list):
if arg not in valid_list:
error_msg = (
f"Invalid {arg_name} . Got: {arg} . Must be one of: "
f"{valid_list} ."
)
raise ValueError(error_msg)
# Configure the metadata manager.
self._metadata_manager = metadata_manager_factory(ConnectivityMetadata)
validate_arg_vs_list("start_index", start_index, [0, 1])
# indices array will be 2-dimensional, so must be either 0 or 1.
validate_arg_vs_list("location_axis", location_axis, [0, 1])
validate_arg_vs_list("cf_role", cf_role, Connectivity.UGRID_CF_ROLES)
self._metadata_manager.start_index = start_index
self._metadata_manager.location_axis = location_axis
self._metadata_manager.cf_role = cf_role
self._connected_axis = 1 - location_axis
self._location, self._connected = cf_role.split("_")[:2]
super().__init__(
values=indices,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=units,
attributes=attributes,
)
@property
def _values(self):
# Overridden just to allow .setter override.
return super()._values
@_values.setter
def _values(self, values):
self._validate_indices(values, shapes_only=True)
# The recommended way of using the setter in super().
super(Connectivity, self.__class__)._values.fset(self, values)
@property
def cf_role(self):
"""
The category of topological relationship that this connectivity
describes.
**Read-only** - validity of :attr:`indices` is dependent on
:attr:`cf_role`. A new :class:`Connectivity` must therefore be defined
if a different :attr:`cf_role` is needed.
"""
return self._metadata_manager.cf_role
@property
def location(self):
"""
Derived from the connectivity's :attr:`cf_role` - the first part, e.g.
``face`` in ``face_node_connectivity``. Refers to the elements that
vary along the :attr:`location_axis` of the connectivity's
:attr:`indices` array.
"""
return self._location
@property
def connected(self):
"""
Derived from the connectivity's :attr:`cf_role` - the second part, e.g.
``node`` in ``face_node_connectivity``. Refers to the elements indexed
by the values in the connectivity's :attr:`indices` array.
"""
return self._connected
@property
def start_index(self):
"""
The base value of the connectivity's :attr:`indices` array; either
``0`` or ``1``.
**Read-only** - validity of :attr:`indices` is dependent on
:attr:`start_index`. A new :class:`Connectivity` must therefore be
defined if a different :attr:`start_index` is needed.
"""
return self._metadata_manager.start_index
@property
def location_axis(self):
"""
The axis of the connectivity's :attr:`indices` array that varies
over the connectivity's :attr:`location` elements. Either ``0`` or ``1``.
**Read-only** - validity of :attr:`indices` is dependent on
:attr:`location_axis`. Use :meth:`transpose` to create a new, transposed
:class:`Connectivity` if a different :attr:`location_axis` is needed.
"""
return self._metadata_manager.location_axis
@property
def connected_axis(self):
"""
Derived as the alternate value of :attr:`location_axis` - each must
equal either ``0`` or ``1``. The axis of the connectivity's
:attr:`indices` array that varies over the :attr:`connected` elements
associated with each :attr:`location` element.
"""
return self._connected_axis
@property
def indices(self):
"""
The index values describing the topological relationship of the
connectivity, as a NumPy array. Masked points indicate a
:attr:`location` element with fewer :attr:`connected` elements than
other :attr:`location` elements described in this array - unused index
'slots' are masked.
**Read-only** - index values are only meaningful when combined with
an appropriate :attr:`cf_role`, :attr:`start_index` and
:attr:`location_axis`. A new :class:`Connectivity` must therefore be
defined if different indices are needed.
"""
return self._values
def indices_by_location(self, indices=None):
"""
Return a view of the indices array with :attr:`location_axis` **always** as
the first axis - transposed if necessary. Can optionally pass in an
identically shaped array on which to perform this operation (e.g. the
output from :meth:`core_indices` or :meth:`lazy_indices`).
Kwargs:
* indices (array):
The array on which to operate. If ``None``, will operate on
:attr:`indices`. Default is ``None``.
Returns:
A view of the indices array, transposed - if necessary - to put
:attr:`location_axis` first.
"""
if indices is None:
indices = self.indices
if indices.shape != self.shape:
raise ValueError(
f"Invalid indices provided. Must be shape={self.shape} , "
f"got shape={indices.shape} ."
)
if self.location_axis == 0:
result = indices
elif self.location_axis == 1:
result = indices.transpose()
else:
raise ValueError("Invalid location_axis.")
return result
def _validate_indices(self, indices, shapes_only=False):
# Use shapes_only=True for a lower resource, less thorough validation
# of indices by just inspecting the array shape instead of inspecting
# individual masks. So will not catch individual location elements
# having unacceptably low numbers of associated connected elements.
def indices_error(message):
raise ValueError("Invalid indices provided. " + message)
indices = self._sanitise_array(indices, 0)
indices_dtype = indices.dtype
if not np.issubdtype(indices_dtype, np.integer):
indices_error(
f"dtype must be numpy integer subtype, got: {indices_dtype} ."
)
indices_min = indices.min()
if _lazy.is_lazy_data(indices_min):
indices_min = indices_min.compute()
if indices_min < self.start_index:
indices_error(
f"Lowest index: {indices_min} < start_index: {self.start_index} ."
)
indices_shape = indices.shape
if len(indices_shape) != 2:
indices_error(
f"Expected 2-dimensional shape, got: shape={indices_shape} ."
)
len_req_fail = False
if shapes_only:
location_shape = indices_shape[self.connected_axis]
# Wrap as lazy to allow use of the same operations below
# regardless of shapes_only.
location_lengths = _lazy.as_lazy_data(np.asarray(location_shape))
else:
# Wouldn't be safe to use during __init__ validation, since
# lazy_location_lengths requires self.indices to exist. Safe here since
# shapes_only==False is only called manually, i.e. after
# initialisation.
location_lengths = self.lazy_location_lengths()
if self.location in ("edge", "boundary"):
if (location_lengths != 2).any().compute():
len_req_fail = "len=2"
else:
if self.location == "face":
min_size = 3
elif self.location == "volume":
if self.connected == "edge":
min_size = 6
else:
min_size = 4
else:
raise NotImplementedError
if (location_lengths < min_size).any().compute():
len_req_fail = f"len>={min_size}"
if len_req_fail:
indices_error(
f"Not all {self.location}s meet requirement: {len_req_fail} - "
f"needed to describe '{self.cf_role}' ."
)
def validate_indices(self):
"""
Perform a thorough validity check of this connectivity's
:attr:`indices`. Includes checking the number of :attr:`connected`
elements associated with each :attr:`location` element (specified using
masks on the :attr:`indices` array) against the :attr:`cf_role`.
Raises a ``ValueError`` if any problems are encountered, otherwise
passes silently.
.. note::
While this uses lazy computation, it will still be a high
resource demand for a large :attr:`indices` array.
"""
self._validate_indices(self.indices, shapes_only=False)
def __eq__(self, other):
eq = NotImplemented
if isinstance(other, Connectivity):
# Account for the fact that other could be the transposed equivalent
# of self, which we consider 'safe' since the recommended
# interaction with the indices array is via indices_by_location, which
# corrects for this difference. (To enable this, location_axis does
# not participate in ConnectivityMetadata to ConnectivityMetadata
# equivalence).
if hasattr(other, "metadata"):
# metadata comparison
eq = self.metadata == other.metadata
if eq:
eq = (
self.shape == other.shape
and self.location_axis == other.location_axis
) or (
self.shape == other.shape[::-1]
and self.location_axis == other.connected_axis
)
if eq:
eq = array_equal(
self.indices_by_location(self.core_indices()),
other.indices_by_location(other.core_indices()),
)
return eq
def transpose(self):
"""
Create a new :class:`Connectivity`, identical to this one but with the
:attr:`indices` array transposed and the :attr:`location_axis` value flipped.
Returns:
A new :class:`Connectivity` that is the transposed equivalent of
the original.
"""
new_connectivity = Connectivity(
indices=self.indices.transpose().copy(),
cf_role=self.cf_role,
standard_name=self.standard_name,
long_name=self.long_name,
var_name=self.var_name,
units=self.units,
attributes=self.attributes,
start_index=self.start_index,
location_axis=self.connected_axis,
)
return new_connectivity
def lazy_indices(self):
"""
Return a lazy array representing the connectivity's indices.
Accessing this method will never cause the :attr:`indices` values to be
loaded. Similarly, calling methods on, or indexing, the returned Array
will not cause the connectivity to have loaded :attr:`indices`.
If the :attr:`indices` have already been loaded for the connectivity,
the returned Array will be a new lazy array wrapper.
Returns:
A lazy array, representing the connectivity indices array.
"""
return super()._lazy_values()
def core_indices(self):
"""
The indices array at the core of this connectivity, which may be a
NumPy array or a Dask array.
Returns:
numpy.ndarray or numpy.ma.core.MaskedArray or dask.array.Array
"""
return super()._core_values()
def has_lazy_indices(self):
"""
Return a boolean indicating whether the connectivity's :attr:`indices`
array is a lazy Dask array or not.
Returns:
boolean
"""
return super()._has_lazy_values()
def lazy_location_lengths(self):
"""
Return a lazy array representing the number of :attr:`connected`
elements associated with each of the connectivity's :attr:`location`
elements, accounting for masks if present.
Accessing this method will never cause the :attr:`indices` values to be
loaded. Similarly, calling methods on, or indexing, the returned Array
will not cause the connectivity to have loaded :attr:`indices`.
The returned Array will be lazy regardless of whether the
:attr:`indices` have already been loaded.
Returns:
A lazy array, representing the number of :attr:`connected`
elements associated with each :attr:`location` element.
"""
location_mask_counts = da.sum(
da.ma.getmaskarray(self.indices), axis=self.connected_axis
)
max_location_size = self.indices.shape[self.connected_axis]
return max_location_size - location_mask_counts
def location_lengths(self):
"""
Return a NumPy array representing the number of :attr:`connected`
elements associated with each of the connectivity's :attr:`location`
elements, accounting for masks if present.
Returns:
A NumPy array, representing the number of :attr:`connected`
elements associated with each :attr:`location` element.
"""
return self.lazy_location_lengths().compute()
def cube_dims(self, cube):
"""Not available on :class:`Connectivity`."""
raise NotImplementedError
def xml_element(self, doc):
# Create the XML element as the camelCaseEquivalent of the
# class name
element = super().xml_element(doc)
element.setAttribute("cf_role", self.cf_role)
element.setAttribute("start_index", self.start_index)
element.setAttribute("location_axis", self.location_axis)
return element
class Mesh(CFVariableMixin):
"""
A container representing the UGRID ``cf_role`` ``mesh_topology``, supporting
1D network, 2D triangular, and 2D flexible mesh topologies.
.. note::
The 3D layered and fully 3D unstructured mesh topologies are not supported
at this time.
.. seealso::
The UGRID Conventions, https://ugrid-conventions.github.io/ugrid-conventions/
"""
# TBD: for volume and/or z-axis support include axis "z" and/or dimension "3"
#: The supported mesh axes.
AXES = ("x", "y")
#: Valid range of values for ``topology_dimension``.
TOPOLOGY_DIMENSIONS = (1, 2)
#: Valid mesh elements.
ELEMENTS = ("edge", "node", "face")
def __init__(
self,
topology_dimension,
node_coords_and_axes,
connectivities,
edge_coords_and_axes=None,
face_coords_and_axes=None,
standard_name=None,
long_name=None,
var_name=None,
units=None,
attributes=None,
node_dimension=None,
edge_dimension=None,
face_dimension=None,
):
"""
.. note::
The purpose of the :attr:`node_dimension`, :attr:`edge_dimension` and
:attr:`face_dimension` properties are to preserve the original NetCDF
variable dimension names. Note that, only :attr:`edge_dimension` and
:attr:`face_dimension` are UGRID attributes, and are only present for
:attr:`topology_dimension` ``>=2``.
"""
# TODO: support volumes.
# TODO: support (coord, "z")
self._metadata_manager = metadata_manager_factory(MeshMetadata)
# topology_dimension is read-only, so assign directly to the metadata manager
if topology_dimension not in self.TOPOLOGY_DIMENSIONS:
emsg = f"Expected 'topology_dimension' in range {self.TOPOLOGY_DIMENSIONS!r}, got {topology_dimension!r}."
raise ValueError(emsg)
self._metadata_manager.topology_dimension = topology_dimension
self.node_dimension = node_dimension
self.edge_dimension = edge_dimension
self.face_dimension = face_dimension
# assign the metadata to the metadata manager
self.standard_name = standard_name
self.long_name = long_name
self.var_name = var_name
self.units = units
self.attributes = attributes
# based on the topology_dimension, create the appropriate coordinate manager
def normalise(element, axis):
result = str(axis).lower()
if result not in self.AXES:
emsg = f"Invalid axis specified for {element} coordinate {coord.name()!r}, got {axis!r}."
raise ValueError(emsg)
return f"{element}_{result}"
if not isinstance(node_coords_and_axes, Iterable):
node_coords_and_axes = [node_coords_and_axes]
if not isinstance(connectivities, Iterable):
connectivities = [connectivities]
kwargs = {}
for coord, axis in node_coords_and_axes:
kwargs[normalise("node", axis)] = coord
if edge_coords_and_axes is not None:
for coord, axis in edge_coords_and_axes:
kwargs[normalise("edge", axis)] = coord
if face_coords_and_axes is not None:
for coord, axis in face_coords_and_axes:
kwargs[normalise("face", axis)] = coord
# check the UGRID minimum requirement for coordinates
if "node_x" not in kwargs:
emsg = (
"Require a node coordinate that is x-axis like to be provided."
)
raise ValueError(emsg)
if "node_y" not in kwargs:
emsg = (
"Require a node coordinate that is y-axis like to be provided."
)
raise ValueError(emsg)
if self.topology_dimension == 1:
self._coord_manager = _Mesh1DCoordinateManager(**kwargs)
self._connectivity_manager = _Mesh1DConnectivityManager(
*connectivities
)
elif self.topology_dimension == 2:
self._coord_manager = _Mesh2DCoordinateManager(**kwargs)
self._connectivity_manager = _Mesh2DConnectivityManager(
*connectivities
)
else:
emsg = f"Unsupported 'topology_dimension', got {topology_dimension!r}."
raise NotImplementedError(emsg)
@classmethod
def from_coords(cls, *coords):
"""
Construct a :class:`Mesh` by derivation from one or more
:class:`~iris.coords.Coord`\\ s.
The :attr:`~Mesh.topology_dimension`, :class:`~iris.coords.Coord`
membership and :class:`Connectivity` membership are all determined
based on the shape of the first :attr:`~iris.coords.Coord.bounds`:
* ``None`` or ``(n, <2)``:
Not supported
* ``(n, 2)``:
:attr:`~Mesh.topology_dimension` = ``1``.
:attr:`~Mesh.node_coords` and :attr:`~Mesh.edge_node_connectivity`
constructed from :attr:`~iris.coords.Coord.bounds`.
:attr:`~Mesh.edge_coords` constructed from
:attr:`~iris.coords.Coord.points`.
* ``(n, >=3)``:
:attr:`~Mesh.topology_dimension` = ``2``.
:attr:`~Mesh.node_coords` and :attr:`~Mesh.face_node_connectivity`
constructed from :attr:`~iris.coords.Coord.bounds`.
:attr:`~Mesh.face_coords` constructed from
:attr:`~iris.coords.Coord.points`.
Args:
* \\*coords (Iterable of :class:`~iris.coords.Coord`):
Coordinates to pass into the :class:`Mesh`.
All :attr:`~iris.coords.Coord.points` must have the same shapes;
all :attr:`~iris.coords.Coord.bounds` must have the same shapes,
and must not be ``None``.
Returns:
:class:`Mesh`
.. note::
Any resulting duplicate nodes are not currently removed, due to the
computational intensity.
.. note::
:class:`Mesh` currently requires ``X`` and ``Y``
:class:`~iris.coords.Coord`\\ s specifically.
:meth:`iris.util.guess_coord_axis` is therefore attempted, else the
first two :class:`~iris.coords.Coord`\\ s are taken.
.. testsetup::
from iris import load_cube, sample_data_path
from iris.experimental.ugrid import (
PARSE_UGRID_ON_LOAD,
Mesh,
MeshCoord,
)
file_path = sample_data_path("mesh_C4_synthetic_float.nc")
with PARSE_UGRID_ON_LOAD.context():
cube_w_mesh = load_cube(file_path)
For example::
# Reconstruct a cube-with-mesh after subsetting it.
>>> print(cube_w_mesh.mesh.name())
Topology data of 2D unstructured mesh
>>> mesh_coord_names = [
... coord.name() for coord in cube_w_mesh.coords(mesh_coords=True)
... ]
>>> print(f"MeshCoords: {mesh_coord_names}")
MeshCoords: ['latitude', 'longitude']
# Subsetting converts MeshCoords to AuxCoords.
>>> slices = [slice(None)] * cube_w_mesh.ndim
>>> slices[cube_w_mesh.mesh_dim()] = slice(-1)
>>> cube_sub = cube_w_mesh[tuple(slices)]
>>> print(cube_sub.mesh)
None
>>> orig_coords = [cube_sub.coord(c_name) for c_name in mesh_coord_names]
>>> for coord in orig_coords:
... print(f"{coord.name()}: {type(coord).__name__}")
latitude: AuxCoord
longitude: AuxCoord
>>> new_mesh = Mesh.from_coords(*orig_coords)
>>> new_coords = new_mesh.to_MeshCoords(location=cube_w_mesh.location)
# Replace the AuxCoords with MeshCoords.
>>> for ix in range(2):
... cube_sub.remove_coord(orig_coords[ix])
... cube_sub.add_aux_coord(new_coords[ix], cube_w_mesh.mesh_dim())
>>> print(cube_sub.mesh.name())
Topology data of 2D unstructured mesh
>>> for coord_name in mesh_coord_names:
... coord = cube_sub.coord(coord_name)
... print(f"{coord_name}: {type(coord).__name__}")
latitude: MeshCoord
longitude: MeshCoord
"""
# Validate points and bounds shape match.
def check_shape(array_name):
attr_name = f"core_{array_name}"
arrays = [getattr(coord, attr_name)() for coord in coords]
if any(a is None for a in arrays):
message = (
f"{array_name} missing from coords[{arrays.index(None)}] ."
)
raise ValueError(message)
shapes = [array.shape for array in arrays]
if shapes.count(shapes[0]) != len(shapes):
message = (
f"{array_name} shapes are not identical for all "
f"coords."
)
raise ValueError(message)
for array in ("points", "bounds"):
check_shape(array)
# Determine dimensionality, using first coord.
first_coord = coords[0]
ndim = first_coord.ndim
if ndim != 1:
message = f"Expected coordinate ndim == 1, got: f{ndim} ."
raise ValueError(message)
bounds_shape = first_coord.core_bounds().shape
bounds_dim1 = bounds_shape[1]
if bounds_dim1 < 2:
message = (
f"Expected coordinate bounds.shape (n, >"
f"=2), got: {bounds_shape} ."
)
raise ValueError(message)
elif bounds_dim1 == 2:
topology_dimension = 1
coord_centring = "edge"
conn_cf_role = "edge_node_connectivity"
else:
topology_dimension = 2
coord_centring = "face"
conn_cf_role = "face_node_connectivity"
# Create connectivity.
if first_coord.has_lazy_bounds():
array_lib = da
else:
array_lib = np
indices = array_lib.arange(np.prod(bounds_shape)).reshape(bounds_shape)
masking = array_lib.ma.getmaskarray(first_coord.core_bounds())
indices = array_lib.ma.masked_array(indices, masking)
connectivity = Connectivity(indices, conn_cf_role)
# Create coords.
node_coords = []
centre_coords = []
for coord in coords:
coord_kwargs = dict(
standard_name=coord.standard_name,
long_name=coord.long_name,
units=coord.units,
attributes=coord.attributes,
)
node_points = array_lib.ma.filled(
coord.core_bounds(), 0.0
).flatten()
node_coords.append(AuxCoord(points=node_points, **coord_kwargs))
centre_points = coord.core_points()
centre_coords.append(
AuxCoord(points=centre_points, **coord_kwargs)
)
#####
# TODO: remove axis assignment once Mesh supports arbitrary coords.
axes_present = [guess_coord_axis(coord) for coord in coords]
axes_required = ("X", "Y")
if all([req in axes_present for req in axes_required]):
axis_indices = [axes_present.index(req) for req in axes_required]
else:
message = (
"Unable to find 'X' and 'Y' using guess_coord_axis. Assuming "
"X=coords[0], Y=coords[1] ."
)
# TODO: reconsider logging level when we have consistent practice.
logger.info(message, extra=dict(cls=None))
axis_indices = range(len(axes_required))
def axes_assign(coord_list):
coords_sorted = [coord_list[ix] for ix in axis_indices]
return zip(coords_sorted, axes_required)
node_coords_and_axes = axes_assign(node_coords)
centre_coords_and_axes = axes_assign(centre_coords)
#####
# Construct the Mesh.
mesh_kwargs = dict(
topology_dimension=topology_dimension,
node_coords_and_axes=node_coords_and_axes,
connectivities=[connectivity],
)
mesh_kwargs[
f"{coord_centring}_coords_and_axes"
] = centre_coords_and_axes
return cls(**mesh_kwargs)
def __eq__(self, other):
result = NotImplemented
if isinstance(other, Mesh):
result = self.metadata == other.metadata
if result:
result = self.all_coords == other.all_coords
if result:
result = self.all_connectivities == other.all_connectivities
return result
def __hash__(self):
# Allow use in sets and as dictionary keys, as is done for :class:`iris.cube.Cube`.
# See https://github.com/SciTools/iris/pull/1772
return hash(id(self))
def __getstate__(self):
return (
self._metadata_manager,
self._coord_manager,
self._connectivity_manager,
)
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def summary(self, shorten=False):
"""
Return a string representation of the Mesh.
Parameters
----------
shorten : bool, default = False
If True, produce a oneline string form of the form <Mesh: ...>.
If False, produce a multi-line detailed print output.
Returns
-------
result : str
"""
if shorten:
result = self._summary_oneline()
else:
result = self._summary_multiline()
return result
def __repr__(self):
return self.summary(shorten=True)
def __str__(self):
return self.summary(shorten=False)
def _summary_oneline(self):
# We use the repr output to produce short one-line identity summary,
# similar to the object.__str__ output "<object at xxx>".
# This form also used in other str() constructions, like MeshCoord.
# By contrast, __str__ (below) produces a readable multi-line printout.
mesh_name = self.name()
if mesh_name in (None, "", "unknown"):
mesh_name = None
if mesh_name:
# Use a more human-readable form
mesh_string = f"<Mesh: '{mesh_name}'>"
else:
# Mimic the generic object.__str__ style.
mesh_id = id(self)
mesh_string = f"<Mesh object at {hex(mesh_id)}>"
return mesh_string
def _summary_multiline(self):
# Produce a readable multi-line summary of the Mesh content.
lines = []
n_indent = 4
indent_str = " " * n_indent
def line(text, i_indent=0):
indent = indent_str * i_indent
lines.append(f"{indent}{text}")
line(f"Mesh : '{self.name()}'")
line(f"topology_dimension: {self.topology_dimension}", 1)
for element in ("node", "edge", "face"):
if element == "node":
element_exists = True
else:
main_conn_name = f"{element}_node_connectivity"
main_conn = getattr(self, main_conn_name, None)
element_exists = main_conn is not None
if element_exists:
# Include a section for this element
line(element, 1)
# Print element dimension
dim_name = f"{element}_dimension"
dim = getattr(self, dim_name)
line(f"{dim_name}: '{dim}'", 2)
# Print defining connectivity (except node)
if element != "node":
main_conn_string = main_conn.summary(
shorten=True, linewidth=0
)
line(f"{main_conn_name}: {main_conn_string}", 2)
# Print coords
include_key = f"include_{element}s"
coords = self.coords(**{include_key: True})
if coords:
line(f"{element} coordinates", 2)
for coord in coords:
coord_string = coord.summary(shorten=True, linewidth=0)
line(coord_string, 3)
# Having dealt with essential info, now add any optional connectivities
# N.B. includes boundaries: as optional connectivity, not an "element"
optional_conn_names = (
"boundary_connectivity",
"face_face_connectivity",
"face_edge_connectivity",
"edge_face_connectivity",
)
optional_conns = [
getattr(self, name, None) for name in optional_conn_names
]
optional_conns = {
name: conn
for conn, name in zip(optional_conns, optional_conn_names)
if conn is not None
}
if optional_conns:
line("optional connectivities", 1)
for name, conn in optional_conns.items():
conn_string = conn.summary(shorten=True, linewidth=0)
line(f"{name}: {conn_string}", 2)
# Output the detail properties, basically those from CFVariableMixin
for name in BaseMetadata._members:
val = getattr(self, name, None)
if val is not None:
if name == "units":
show = val.origin != Unit(None)
elif isinstance(val, Container):
show = bool(val)
else:
show = val is not None
if show:
if name == "attributes":
# Use a multi-line form for this.
line("attributes:", 1)
max_attname_len = max(len(attr) for attr in val.keys())
for attrname, attrval in val.items():
attrname = attrname.ljust(max_attname_len)
if isinstance(attrval, str):
# quote strings
attrval = repr(attrval)
# and abbreviate really long ones
attrval = clip_string(attrval)
attr_string = f"{attrname} {attrval}"
line(attr_string, 2)
else:
line(f"{name}: {val!r}", 1)
result = "\n".join(lines)
return result
def __setstate__(self, state):
metadata_manager, coord_manager, connectivity_manager = state
self._metadata_manager = metadata_manager
self._coord_manager = coord_manager
self._connectivity_manager = connectivity_manager
def _set_dimension_names(self, node, edge, face, reset=False):
args = (node, edge, face)
currents = (
self.node_dimension,
self.edge_dimension,
self.face_dimension,
)
zipped = zip(args, currents)
if reset:
node, edge, face = [
None if arg else current for arg, current in zipped
]
else:
node, edge, face = [arg or current for arg, current in zipped]
self.node_dimension = node
self.edge_dimension = edge
self.face_dimension = face
if self.topology_dimension == 1:
result = Mesh1DNames(self.node_dimension, self.edge_dimension)
elif self.topology_dimension == 2:
result = Mesh2DNames(
self.node_dimension, self.edge_dimension, self.face_dimension
)
else:
message = (
f"Unsupported topology_dimension: {self.topology_dimension} ."
)
raise NotImplementedError(message)
return result
@property
def all_connectivities(self):
"""
All the :class:`~iris.experimental.ugrid.mesh.Connectivity` instances
of the :class:`Mesh`.
"""
return self._connectivity_manager.all_members
@property
def all_coords(self):
"""
All the :class:`~iris.coords.AuxCoord` coordinates of the :class:`Mesh`.
"""
return self._coord_manager.all_members
@property
def boundary_node_connectivity(self):
"""
The *optional* UGRID ``boundary_node_connectivity``
:class:`~iris.experimental.ugrid.mesh.Connectivity` of the
:class:`Mesh`.
"""
return self._connectivity_manager.boundary_node
@property
def edge_coords(self):
"""
The *optional* UGRID ``edge`` :class:`~iris.coords.AuxCoord` coordinates
of the :class:`Mesh`.
"""
return self._coord_manager.edge_coords
@property
def edge_dimension(self):
"""
The *optionally required* UGRID NetCDF variable name for the ``edge``
dimension.
"""
return self._metadata_manager.edge_dimension
@edge_dimension.setter
def edge_dimension(self, name):
if not name or not isinstance(name, str):
edge_dimension = f"Mesh{self.topology_dimension}d_edge"
else:
edge_dimension = name
self._metadata_manager.edge_dimension = edge_dimension
@property
def edge_face_connectivity(self):
"""
The *optional* UGRID ``edge_face_connectivity``
:class:`~iris.experimental.ugrid.mesh.Connectivity` of the
:class:`Mesh`.
"""
return self._connectivity_manager.edge_face
@property
def edge_node_connectivity(self):
"""
The UGRID ``edge_node_connectivity``
:class:`~iris.experimental.ugrid.mesh.Connectivity` of the
:class:`Mesh`, which is **required** for :attr:`Mesh.topology_dimension`
of ``1``, and *optionally required* for
:attr:`Mesh.topology_dimension` ``>=2``.
"""
return self._connectivity_manager.edge_node
@property
def face_coords(self):
"""
The *optional* UGRID ``face`` :class:`~iris.coords.AuxCoord` coordinates
of the :class:`Mesh`.
"""
return self._coord_manager.face_coords
@property
def face_dimension(self):
"""
The *optionally required* UGRID NetCDF variable name for the ``face``
dimension.
"""
return self._metadata_manager.face_dimension
@face_dimension.setter
def face_dimension(self, name):
if self.topology_dimension < 2:
face_dimension = None
if name:
# Tell the user it is not being set if they expected otherwise.
message = (
"Not setting face_dimension (inappropriate for "
f"topology_dimension={self.topology_dimension} ."
)
logger.debug(message, extra=dict(cls=self.__class__.__name__))
elif not name or not isinstance(name, str):
face_dimension = f"Mesh{self.topology_dimension}d_face"
else:
face_dimension = name
self._metadata_manager.face_dimension = face_dimension
@property
def face_edge_connectivity(self):
"""
The *optional* UGRID ``face_edge_connectivity``
:class:`~iris.experimental.ugrid.mesh.Connectivity` of the
:class:`Mesh`.
"""
# optional
return self._connectivity_manager.face_edge
@property
def face_face_connectivity(self):
"""
The *optional* UGRID ``face_face_connectivity``
:class:`~iris.experimental.ugrid.mesh.Connectivity` of the
:class:`Mesh`.
"""
return self._connectivity_manager.face_face
@property
def face_node_connectivity(self):
"""
The UGRID ``face_node_connectivity``
:class:`~iris.experimental.ugrid.mesh.Connectivity` of the
:class:`Mesh`, which is **required** for :attr:`Mesh.topology_dimension`
of ``2``, and *optionally required* for :attr:`Mesh.topology_dimension`
of ``3``.
"""
return self._connectivity_manager.face_node
@property
def node_coords(self):
"""
The **required** UGRID ``node`` :class:`~iris.coords.AuxCoord` coordinates
of the :class:`Mesh`.
"""
return self._coord_manager.node_coords
@property
def node_dimension(self):
"""The NetCDF variable name for the ``node`` dimension."""
return self._metadata_manager.node_dimension
@node_dimension.setter
def node_dimension(self, name):
if not name or not isinstance(name, str):
node_dimension = f"Mesh{self.topology_dimension}d_node"
else:
node_dimension = name
self._metadata_manager.node_dimension = node_dimension
def add_connectivities(self, *connectivities):
"""
Add one or more :class:`~iris.experimental.ugrid.mesh.Connectivity` instances to the :class:`Mesh`.
Args:
* connectivities (iterable of object):
A collection of one or more
:class:`~iris.experimental.ugrid.mesh.Connectivity` instances to
add to the :class:`Mesh`.
"""
self._connectivity_manager.add(*connectivities)
def add_coords(
self,
node_x=None,
node_y=None,
edge_x=None,
edge_y=None,
face_x=None,
face_y=None,
):
"""
Add one or more :class:`~iris.coords.AuxCoord` coordinates to the :class:`Mesh`.
Kwargs:
* node_x (object):
The ``x-axis`` like ``node`` :class:`~iris.coords.AuxCoord`.
* node_y (object):
The ``y-axis`` like ``node`` :class:`~iris.coords.AuxCoord`.
* edge_x (object):
The ``x-axis`` like ``edge`` :class:`~iris.coords.AuxCoord`.
* edge_y (object):
The ``y-axis`` like ``edge`` :class:`~iris.coords.AuxCoord`.
* face_x (object):
The ``x-axis`` like ``face`` :class:`~iris.coords.AuxCoord`.
* face_y (object):
The ``y-axis`` like ``face`` :class:`~iris.coords.AuxCoord`.
"""
# Filter out absent arguments - only expecting face coords sometimes,
# same will be true of volumes in future.
kwargs = {
"node_x": node_x,
"node_y": node_y,
"edge_x": edge_x,
"edge_y": edge_y,
"face_x": face_x,
"face_y": face_y,
}
kwargs = {k: v for k, v in kwargs.items() if v}
self._coord_manager.add(**kwargs)
def connectivities(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
cf_role=None,
contains_node=None,
contains_edge=None,
contains_face=None,
):
"""
Return all :class:`~iris.experimental.ugrid.mesh.Connectivity`
instances from the :class:`Mesh` that match the provided criteria.
Criteria can be either specific properties or other objects with
metadata to be matched.
.. seealso::
:meth:`Mesh.connectivity` for matching exactly one connectivity.
Kwargs:
* item (str or object):
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is
compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a connectivity or metadata instance equal to that of
the desired objects e.g.,
:class:`~iris.experimental.ugrid.mesh.Connectivity` or
:class:`~iris.experimental.ugrid.metadata.ConnectivityMetadata`.
* standard_name (str):
The CF standard name of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``standard_name``.
* long_name (str):
An unconstrained description of the
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``long_name``.
* var_name (str):
The NetCDF variable name of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``var_name``.
* attributes (dict):
A dictionary of attributes desired on the
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``attributes``.
* cf_role (str):
The UGRID ``cf_role`` of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`.
* contains_node (bool):
Contains the ``node`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched.
* contains_edge (bool):
Contains the ``edge`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched.
* contains_face (bool):
Contains the ``face`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched.
Returns:
A list of :class:`~iris.experimental.ugrid.mesh.Connectivity`
instances from the :class:`Mesh` that matched the given criteria.
"""
result = self._connectivity_manager.filters(
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
cf_role=cf_role,
contains_node=contains_node,
contains_edge=contains_edge,
contains_face=contains_face,
)
return list(result.values())
def connectivity(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
cf_role=None,
contains_node=None,
contains_edge=None,
contains_face=None,
):
"""
Return a single :class:`~iris.experimental.ugrid.mesh.Connectivity`
from the :class:`Mesh` that matches the provided criteria.
Criteria can be either specific properties or other objects with
metadata to be matched.
.. note::
If the given criteria do not return **precisely one**
:class:`~iris.experimental.ugrid.mesh.Connectivity`, then a
:class:`~iris.exceptions.ConnectivityNotFoundError` is raised.
.. seealso::
:meth:`Mesh.connectivities` for matching zero or more connectivities.
Kwargs:
* item (str or object):
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is
compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a connectivity or metadata instance equal to that of
the desired object e.g.,
:class:`~iris.experimental.ugrid.mesh.Connectivity` or
:class:`~iris.experimental.ugrid.metadata.ConnectivityMetadata`.
* standard_name (str):
The CF standard name of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``standard_name``.
* long_name (str):
An unconstrained description of the
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``long_name``.
* var_name (str):
The NetCDF variable name of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``var_name``.
* attributes (dict):
A dictionary of attributes desired on the
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``attributes``.
* cf_role (str):
The UGRID ``cf_role`` of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`.
* contains_node (bool):
Contains the ``node`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched.
* contains_edge (bool):
Contains the ``edge`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched.
* contains_face (bool):
Contains the ``face`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched.
Returns:
The :class:`~iris.experimental.ugrid.mesh.Connectivity` from the
:class:`Mesh` that matched the given criteria.
"""
result = self._connectivity_manager.filter(
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
cf_role=cf_role,
contains_node=contains_node,
contains_edge=contains_edge,
contains_face=contains_face,
)
return list(result.values())[0]
def coord(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
include_nodes=None,
include_edges=None,
include_faces=None,
):
"""
Return a single :class:`~iris.coords.AuxCoord` coordinate from the
:class:`Mesh` that matches the provided criteria.
Criteria can be either specific properties or other objects with
metadata to be matched.
.. note::
If the given criteria do not return **precisely one** coordinate,
then a :class:`~iris.exceptions.CoordinateNotFoundError` is raised.
.. seealso::
:meth:`Mesh.coords` for matching zero or more coordinates.
Kwargs:
* item (str or object):
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is
compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a coordinate or metadata instance equal to that of
the desired coordinate e.g., :class:`~iris.coords.AuxCoord` or
:class:`~iris.common.metadata.CoordMetadata`.
* standard_name (str):
The CF standard name of the desired coordinate. If ``None``, does not
check for ``standard_name``.
* long_name (str):
An unconstrained description of the coordinate. If ``None``, does not
check for ``long_name``.
* var_name (str):
The NetCDF variable name of the desired coordinate. If ``None``, does
not check for ``var_name``.
* attributes (dict):
A dictionary of attributes desired on the coordinates. If ``None``,
does not check for ``attributes``.
* axis (str):
The desired coordinate axis, see :func:`~iris.util.guess_coord_axis`.
If ``None``, does not check for ``axis``. Accepts the values ``X``,
``Y``, ``Z`` and ``T`` (case-insensitive).
* include_node (bool):
Include all ``node`` coordinates in the list of objects to be matched.
* include_edge (bool):
Include all ``edge`` coordinates in the list of objects to be matched.
* include_face (bool):
Include all ``face`` coordinates in the list of objects to be matched.
Returns:
The :class:`~iris.coords.AuxCoord` coordinate from the :class:`Mesh`
that matched the given criteria.
"""
result = self._coord_manager.filter(
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
axis=axis,
include_nodes=include_nodes,
include_edges=include_edges,
include_faces=include_faces,
)
return list(result.values())[0]
def coords(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
include_nodes=None,
include_edges=None,
include_faces=None,
):
"""
Return all :class:`~iris.coords.AuxCoord` coordinates from the :class:`Mesh` that
match the provided criteria.
Criteria can be either specific properties or other objects with
metadata to be matched.
.. seealso::
:meth:`Mesh.coord` for matching exactly one coordinate.
Kwargs:
* item (str or object):
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is
compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a coordinate or metadata instance equal to that of
the desired coordinates e.g., :class:`~iris.coords.AuxCoord` or
:class:`~iris.common.metadata.CoordMetadata`.
* standard_name (str):
The CF standard name of the desired coordinate. If ``None``, does not
check for ``standard_name``.
* long_name (str):
An unconstrained description of the coordinate. If ``None``, does not
check for ``long_name``.
* var_name (str):
The NetCDF variable name of the desired coordinate. If ``None``, does
not check for ``var_name``.
* attributes (dict):
A dictionary of attributes desired on the coordinates. If ``None``,
does not check for ``attributes``.
* axis (str):
The desired coordinate axis, see :func:`~iris.util.guess_coord_axis`.
If ``None``, does not check for ``axis``. Accepts the values ``X``,
``Y``, ``Z`` and ``T`` (case-insensitive).
* include_node (bool):
Include all ``node`` coordinates in the list of objects to be matched.
* include_edge (bool):
Include all ``edge`` coordinates in the list of objects to be matched.
* include_face (bool):
Include all ``face`` coordinates in the list of objects to be matched.
Returns:
A list of :class:`~iris.coords.AuxCoord` coordinates from the
:class:`Mesh` that matched the given criteria.
"""
result = self._coord_manager.filters(
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
axis=axis,
include_nodes=include_nodes,
include_edges=include_edges,
include_faces=include_faces,
)
return list(result.values())
def remove_connectivities(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
cf_role=None,
contains_node=None,
contains_edge=None,
contains_face=None,
):
"""
Remove one or more :class:`~iris.experimental.ugrid.mesh.Connectivity`
from the :class:`Mesh` that match the provided criteria.
Criteria can be either specific properties or other objects with
metadata to be matched.
Kwargs:
* item (str or object):
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is
compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a connectivity or metadata instance equal to that of
the desired objects e.g.,
:class:`~iris.experimental.ugrid.mesh.Connectivity` or
:class:`~iris.experimental.ugrid.metadata.ConnectivityMetadata`.
* standard_name (str):
The CF standard name of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``standard_name``.
* long_name (str):
An unconstrained description of the
:class:`~iris.experimental.ugrid.mesh.Connectivity. If ``None``,
does not check for ``long_name``.
* var_name (str):
The NetCDF variable name of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``var_name``.
* attributes (dict):
A dictionary of attributes desired on the
:class:`~iris.experimental.ugrid.mesh.Connectivity`. If ``None``,
does not check for ``attributes``.
* cf_role (str):
The UGRID ``cf_role`` of the desired
:class:`~iris.experimental.ugrid.mesh.Connectivity`.
* contains_node (bool):
Contains the ``node`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched for potential removal.
* contains_edge (bool):
Contains the ``edge`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched for potential removal.
* contains_face (bool):
Contains the ``face`` element as part of the
:attr:`~iris.experimental.ugrid.metadata.ConnectivityMetadata.cf_role`
in the list of objects to be matched for potential removal.
Returns:
A list of :class:`~iris.experimental.ugrid.mesh.Connectivity`
instances removed from the :class:`Mesh` that matched the given
criteria.
"""
return self._connectivity_manager.remove(
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
cf_role=cf_role,
contains_node=contains_node,
contains_edge=contains_edge,
contains_face=contains_face,
)
def remove_coords(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
include_nodes=None,
include_edges=None,
include_faces=None,
):
"""
Remove one or more :class:`~iris.coords.AuxCoord` from the :class:`Mesh`
that match the provided criteria.
Criteria can be either specific properties or other objects with
metadata to be matched.
Kwargs:
* item (str or object):
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is
compared against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a coordinate or metadata instance equal to that of
the desired coordinates e.g., :class:`~iris.coords.AuxCoord` or
:class:`~iris.common.metadata.CoordMetadata`.
* standard_name (str):
The CF standard name of the desired coordinate. If ``None``, does not
check for ``standard_name``.
* long_name (str):
An unconstrained description of the coordinate. If ``None``, does not
check for ``long_name``.
* var_name (str):
The NetCDF variable name of the desired coordinate. If ``None``, does
not check for ``var_name``.
* attributes (dict):
A dictionary of attributes desired on the coordinates. If ``None``,
does not check for ``attributes``.
* axis (str):
The desired coordinate axis, see :func:`~iris.util.guess_coord_axis`.
If ``None``, does not check for ``axis``. Accepts the values ``X``,
``Y``, ``Z`` and ``T`` (case-insensitive).
* include_node (bool):
Include all ``node`` coordinates in the list of objects to be matched
for potential removal.
* include_edge (bool):
Include all ``edge`` coordinates in the list of objects to be matched
for potential removal.
* include_face (bool):
Include all ``face`` coordinates in the list of objects to be matched
for potential removal.
Returns:
A list of :class:`~iris.coords.AuxCoord` coordinates removed from
the :class:`Mesh` that matched the given criteria.
"""
# Filter out absent arguments - only expecting face coords sometimes,
# same will be true of volumes in future.
kwargs = {
"item": item,
"standard_name": standard_name,
"long_name": long_name,
"var_name": var_name,
"attributes": attributes,
"axis": axis,
"include_nodes": include_nodes,
"include_edges": include_edges,
"include_faces": include_faces,
}
kwargs = {k: v for k, v in kwargs.items() if v}
return self._coord_manager.remove(**kwargs)
def xml_element(self, doc):
"""
Create the :class:`xml.dom.minidom.Element` that describes this
:class:`Mesh`.
Args:
* doc (object):
The parent :class:`xml.dom.minidom.Document`.
Returns:
The :class:`xml.dom.minidom.Element` that will describe this
:class:`Mesh`, and the dictionary of attributes that require
to be added to this element.
"""
pass
# the MeshCoord will always have bounds, perhaps points. However the MeshCoord.guess_points() may
# be a very useful part of its behaviour.
# after using MeshCoord.guess_points(), the user may wish to add the associated MeshCoord.points into
# the Mesh as face_coordinates.
# def to_AuxCoord(self, location, axis):
# # factory method
# # return the lazy AuxCoord(...) for the given location and axis
#
# def to_AuxCoords(self, location):
# # factory method
# # return the lazy AuxCoord(...), AuxCoord(...)
def to_MeshCoord(self, location, axis):
"""
Generate a :class:`~iris.experimental.ugrid.mesh.MeshCoord` that
references the current :class:`Mesh`, and passing through the
``location`` and ``axis`` arguments.
.. seealso::
:meth:`to_MeshCoords` for generating a series of mesh coords.
Args:
* location (str)
The ``location`` argument for
:class:`~iris.experimental.ugrid.mesh.MeshCoord` instantiation.
* axis (str)
The ``axis`` argument for
:class:`~iris.experimental.ugrid.mesh.MeshCoord` instantiation.
Returns:
A :class:`~iris.experimental.ugrid.mesh.MeshCoord` referencing the
current :class:`Mesh`.
"""
return MeshCoord(mesh=self, location=location, axis=axis)
def to_MeshCoords(self, location):
"""
Generate a tuple of
:class:`~iris.experimental.ugrid.mesh.MeshCoord`\\ s, each referencing
the current :class:`Mesh`, one for each :attr:`AXES` value, passing
through the ``location`` argument.
.. seealso::
:meth:`to_MeshCoord` for generating a single mesh coord.
Args:
* location (str)
The ``location`` argument for :class:`MeshCoord` instantiation.
Returns:
tuple of :class:`~iris.experimental.ugrid.mesh.MeshCoord`\\ s
referencing the current :class:`Mesh`. One for each value in
:attr:`AXES`, using the value for the ``axis`` argument.
"""
# factory method
result = [
self.to_MeshCoord(location=location, axis=ax) for ax in self.AXES
]
return tuple(result)
def dimension_names_reset(self, node=False, edge=False, face=False):
"""
Reset the name used for the NetCDF variable representing the ``node``,
``edge`` and/or ``face`` dimension to ``None``.
Kwargs:
* node (bool):
Reset the name of the ``node`` dimension if ``True``. Default
is ``False``.
* edge (bool):
Reset the name of the ``edge`` dimension if ``True``. Default
is ``False``.
* face (bool):
Reset the name of the ``face`` dimension if ``True``. Default
is ``False``.
"""
return self._set_dimension_names(node, edge, face, reset=True)
def dimension_names(self, node=None, edge=None, face=None):
"""
Assign the name to be used for the NetCDF variable representing
the ``node``, ``edge`` and ``face`` dimension.
The default value of ``None`` will not be assigned to clear the
associated ``node``, ``edge`` or ``face``. Instead use
:meth:`Mesh.dimension_names_reset`.
Kwargs:
* node (str):
The name to be used for the NetCDF variable representing the
``node`` dimension.
* edge (str):
The name to be used for the NetCDF variable representing the
``edge`` dimension.
* face (str):
The name to be used for the NetCDF variable representing the
``face`` dimension.
"""
return self._set_dimension_names(node, edge, face, reset=False)
@property
def cf_role(self):
"""The UGRID ``cf_role`` attribute of the :class:`Mesh`."""
return "mesh_topology"
@property
def topology_dimension(self):
"""
The UGRID ``topology_dimension`` attribute represents the highest
dimensionality of all the geometric elements (node, edge, face) represented
within the :class:`Mesh`.
"""
return self._metadata_manager.topology_dimension
class _Mesh1DCoordinateManager:
"""
TBD: require clarity on coord_systems validation
TBD: require clarity on __eq__ support
TBD: rationalise self.coords() logic with other manager and Cube
"""
REQUIRED = (
"node_x",
"node_y",
)
OPTIONAL = (
"edge_x",
"edge_y",
)
def __init__(self, node_x, node_y, edge_x=None, edge_y=None):
# initialise all the coordinates
self.ALL = self.REQUIRED + self.OPTIONAL
self._members = {member: None for member in self.ALL}
# required coordinates
self.node_x = node_x
self.node_y = node_y
# optional coordinates
self.edge_x = edge_x
self.edge_y = edge_y
def __eq__(self, other):
# TBD: this is a minimalist implementation and requires to be revisited
return id(self) == id(other)
def __getstate__(self):
return self._members
def __iter__(self):
for item in self._members.items():
yield item
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def __repr__(self):
args = [
f"{member}={coord!r}"
for member, coord in self
if coord is not None
]
return f"{self.__class__.__name__}({', '.join(args)})"
def __setstate__(self, state):
self._members = state
def __str__(self):
args = [f"{member}" for member, coord in self if coord is not None]
return f"{self.__class__.__name__}({', '.join(args)})"
def _remove(self, **kwargs):
result = {}
members = self.filters(**kwargs)
for member in members.keys():
if member in self.REQUIRED:
dmsg = f"Ignoring request to remove required coordinate {member!r}"
logger.debug(dmsg, extra=dict(cls=self.__class__.__name__))
else:
result[member] = members[member]
setattr(self, member, None)
return result
def _setter(self, element, axis, coord, shape):
axis = axis.lower()
member = f"{element}_{axis}"
# enforce the UGRID minimum coordinate requirement
if element == "node" and coord is None:
emsg = (
f"{member!r} is a required coordinate, cannot set to 'None'."
)
raise ValueError(emsg)
if coord is not None:
if not isinstance(coord, AuxCoord):
emsg = f"{member!r} requires to be an 'AuxCoord', got {type(coord)}."
raise TypeError(emsg)
guess_axis = guess_coord_axis(coord)
if guess_axis and guess_axis.lower() != axis:
emsg = f"{member!r} requires a {axis}-axis like 'AuxCoord', got a {guess_axis.lower()}-axis like."
raise TypeError(emsg)
if coord.climatological:
emsg = f"{member!r} cannot be a climatological 'AuxCoord'."
raise TypeError(emsg)
if shape is not None and coord.shape != shape:
emsg = f"{member!r} requires to have shape {shape!r}, got {coord.shape!r}."
raise ValueError(emsg)
self._members[member] = coord
def _shape(self, element):
coord = getattr(self, f"{element}_x")
shape = coord.shape if coord is not None else None
if shape is None:
coord = getattr(self, f"{element}_y")
if coord is not None:
shape = coord.shape
return shape
@property
def _edge_shape(self):
return self._shape(element="edge")
@property
def _node_shape(self):
return self._shape(element="node")
@property
def all_members(self):
return Mesh1DCoords(**self._members)
@property
def edge_coords(self):
return MeshEdgeCoords(edge_x=self.edge_x, edge_y=self.edge_y)
@property
def edge_x(self):
return self._members["edge_x"]
@edge_x.setter
def edge_x(self, coord):
self._setter(
element="edge", axis="x", coord=coord, shape=self._edge_shape
)
@property
def edge_y(self):
return self._members["edge_y"]
@edge_y.setter
def edge_y(self, coord):
self._setter(
element="edge", axis="y", coord=coord, shape=self._edge_shape
)
@property
def node_coords(self):
return MeshNodeCoords(node_x=self.node_x, node_y=self.node_y)
@property
def node_x(self):
return self._members["node_x"]
@node_x.setter
def node_x(self, coord):
self._setter(
element="node", axis="x", coord=coord, shape=self._node_shape
)
@property
def node_y(self):
return self._members["node_y"]
@node_y.setter
def node_y(self, coord):
self._setter(
element="node", axis="y", coord=coord, shape=self._node_shape
)
def _add(self, coords):
member_x, member_y = coords._fields
# deal with the special case where both members are changing
if coords[0] is not None and coords[1] is not None:
cache_x = self._members[member_x]
cache_y = self._members[member_y]
self._members[member_x] = None
self._members[member_y] = None
try:
setattr(self, member_x, coords[0])
setattr(self, member_y, coords[1])
except (TypeError, ValueError):
# restore previous valid state
self._members[member_x] = cache_x
self._members[member_y] = cache_y
# now, re-raise the exception
raise
else:
# deal with the case where one or no member is changing
if coords[0] is not None:
setattr(self, member_x, coords[0])
if coords[1] is not None:
setattr(self, member_y, coords[1])
def add(self, node_x=None, node_y=None, edge_x=None, edge_y=None):
"""
use self.remove(edge_x=True) to remove a coordinate e.g., using the
pattern self.add(edge_x=None) will not remove the edge_x coordinate
"""
self._add(MeshNodeCoords(node_x, node_y))
self._add(MeshEdgeCoords(edge_x, edge_y))
def filter(self, **kwargs):
# TODO: rationalise commonality with MeshConnectivityManager.filter and Cube.coord.
result = self.filters(**kwargs)
if len(result) > 1:
names = ", ".join(
f"{member}={coord!r}" for member, coord in result.items()
)
emsg = (
f"Expected to find exactly 1 coordinate, but found {len(result)}. "
f"They were: {names}."
)
raise CoordinateNotFoundError(emsg)
if len(result) == 0:
item = kwargs["item"]
if item is not None:
if not isinstance(item, str):
item = item.name()
name = (
item
or kwargs["standard_name"]
or kwargs["long_name"]
or kwargs["var_name"]
or None
)
name = "" if name is None else f"{name!r} "
emsg = (
f"Expected to find exactly 1 {name}coordinate, but found none."
)
raise CoordinateNotFoundError(emsg)
return result
def filters(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
include_nodes=None,
include_edges=None,
include_faces=None,
):
# TBD: support coord_systems?
# Preserve original argument before modifying.
face_requested = include_faces
# Rationalise the tri-state behaviour.
args = [include_nodes, include_edges, include_faces]
state = not any(set(filter(lambda arg: arg is not None, args)))
include_nodes, include_edges, include_faces = map(
lambda arg: arg if arg is not None else state, args
)
def populated_coords(coords_tuple):
return list(filter(None, list(coords_tuple)))
members = []
if include_nodes:
members += populated_coords(self.node_coords)
if include_edges:
members += populated_coords(self.edge_coords)
if hasattr(self, "face_coords"):
if include_faces:
members += populated_coords(self.face_coords)
elif face_requested:
dmsg = "Ignoring request to filter non-existent 'face_coords'"
logger.debug(dmsg, extra=dict(cls=self.__class__.__name__))
result = metadata_filter(
members,
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
axis=axis,
)
# Use the results to filter the _members dict for returning.
result_ids = [id(r) for r in result]
result_dict = {
k: v for k, v in self._members.items() if id(v) in result_ids
}
return result_dict
def remove(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
include_nodes=None,
include_edges=None,
):
return self._remove(
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
axis=axis,
include_nodes=include_nodes,
include_edges=include_edges,
)
class _Mesh2DCoordinateManager(_Mesh1DCoordinateManager):
OPTIONAL = (
"edge_x",
"edge_y",
"face_x",
"face_y",
)
def __init__(
self,
node_x,
node_y,
edge_x=None,
edge_y=None,
face_x=None,
face_y=None,
):
super().__init__(node_x, node_y, edge_x=edge_x, edge_y=edge_y)
# optional coordinates
self.face_x = face_x
self.face_y = face_y
@property
def _face_shape(self):
return self._shape(element="face")
@property
def all_members(self):
return Mesh2DCoords(**self._members)
@property
def face_coords(self):
return MeshFaceCoords(face_x=self.face_x, face_y=self.face_y)
@property
def face_x(self):
return self._members["face_x"]
@face_x.setter
def face_x(self, coord):
self._setter(
element="face", axis="x", coord=coord, shape=self._face_shape
)
@property
def face_y(self):
return self._members["face_y"]
@face_y.setter
def face_y(self, coord):
self._setter(
element="face", axis="y", coord=coord, shape=self._face_shape
)
def add(
self,
node_x=None,
node_y=None,
edge_x=None,
edge_y=None,
face_x=None,
face_y=None,
):
super().add(node_x=node_x, node_y=node_y, edge_x=edge_x, edge_y=edge_y)
self._add(MeshFaceCoords(face_x, face_y))
def remove(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
include_nodes=None,
include_edges=None,
include_faces=None,
):
return self._remove(
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
axis=axis,
include_nodes=include_nodes,
include_edges=include_edges,
include_faces=include_faces,
)
class _MeshConnectivityManagerBase(ABC):
# Override these in subclasses.
REQUIRED: tuple = NotImplemented
OPTIONAL: tuple = NotImplemented
def __init__(self, *connectivities):
cf_roles = [c.cf_role for c in connectivities]
for requisite in self.REQUIRED:
if requisite not in cf_roles:
message = f"{type(self).__name__} requires a {requisite} Connectivity."
raise ValueError(message)
self.ALL = self.REQUIRED + self.OPTIONAL
self._members = {member: None for member in self.ALL}
self.add(*connectivities)
def __eq__(self, other):
# TBD: this is a minimalist implementation and requires to be revisited
return id(self) == id(other)
def __getstate__(self):
return self._members
def __iter__(self):
for item in self._members.items():
yield item
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def __repr__(self):
args = [
f"{member}={connectivity!r}"
for member, connectivity in self
if connectivity is not None
]
return f"{self.__class__.__name__}({', '.join(args)})"
def __setstate__(self, state):
self._members = state
def __str__(self):
args = [
f"{member}"
for member, connectivity in self
if connectivity is not None
]
return f"{self.__class__.__name__}({', '.join(args)})"
@property
@abstractmethod
def all_members(self):
return NotImplemented
def add(self, *connectivities):
# Since Connectivity classes include their cf_role, no setters will be
# provided, just a means to add one or more connectivities to the
# manager.
# No warning is raised for duplicate cf_roles - user is trusted to
# validate their outputs.
add_dict = {}
for connectivity in connectivities:
if not isinstance(connectivity, Connectivity):
message = f"Expected Connectivity, got: {type(connectivity)} ."
raise TypeError(message)
cf_role = connectivity.cf_role
if cf_role not in self.ALL:
message = (
f"Not adding connectivity ({cf_role}: "
f"{connectivity!r}) - cf_role must be one of: {self.ALL} ."
)
logger.debug(message, extra=dict(cls=self.__class__.__name__))
else:
add_dict[cf_role] = connectivity
# Validate shapes.
proposed_members = {**self._members, **add_dict}
elements = set(
[c.location for c in proposed_members.values() if c is not None]
)
for element in elements:
counts = [
len(c.indices_by_location(c.lazy_indices()))
for c in proposed_members.values()
if c is not None and c.location == element
]
# Check is list values are identical.
if not counts.count(counts[0]) == len(counts):
message = (
f"Invalid Connectivities provided - inconsistent "
f"{element} counts."
)
raise ValueError(message)
self._members = proposed_members
def filter(self, **kwargs):
# TODO: rationalise commonality with MeshCoordManager.filter and Cube.coord.
result = self.filters(**kwargs)
if len(result) > 1:
names = ", ".join(
f"{member}={connectivity!r}"
for member, connectivity in result.items()
)
message = (
f"Expected to find exactly 1 connectivity, but found "
f"{len(result)}. They were: {names}."
)
raise ConnectivityNotFoundError(message)
elif len(result) == 0:
item = kwargs["item"]
_name = item
if item is not None:
if not isinstance(item, str):
_name = item.name()
bad_name = (
_name or kwargs["standard_name"] or kwargs["long_name"] or ""
)
message = (
f"Expected to find exactly 1 {bad_name} connectivity, "
f"but found none."
)
raise ConnectivityNotFoundError(message)
return result
def filters(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
cf_role=None,
contains_node=None,
contains_edge=None,
contains_face=None,
):
members = [c for c in self._members.values() if c is not None]
if cf_role is not None:
members = [
instance for instance in members if instance.cf_role == cf_role
]
def element_filter(instances, loc_arg, loc_name):
if loc_arg is False:
filtered = [
instance
for instance in instances
if loc_name
not in (
instance.location,
instance.connected,
)
]
elif loc_arg is None:
filtered = instances
else:
# Interpret any other value as =True.
filtered = [
instance
for instance in instances
if loc_name in (instance.location, instance.connected)
]
return filtered
for arg, loc in (
(contains_node, "node"),
(contains_edge, "edge"),
(contains_face, "face"),
):
members = element_filter(members, arg, loc)
# No need to actually modify filtering behaviour - already won't return
# any face cf-roles if none are present.
supports_faces = any(["face" in role for role in self.ALL])
if contains_face and not supports_faces:
message = (
"Ignoring request to filter for non-existent 'face' cf-roles."
)
logger.debug(message, extra=dict(cls=self.__class__.__name__))
result = metadata_filter(
members,
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
)
# Use the results to filter the _members dict for returning.
result_ids = [id(r) for r in result]
result_dict = {
k: v for k, v in self._members.items() if id(v) in result_ids
}
return result_dict
def remove(
self,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
cf_role=None,
contains_node=None,
contains_edge=None,
contains_face=None,
):
removal_dict = self.filters(
item=item,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
attributes=attributes,
cf_role=cf_role,
contains_node=contains_node,
contains_edge=contains_edge,
contains_face=contains_face,
)
for cf_role in self.REQUIRED:
excluded = removal_dict.pop(cf_role, None)
if excluded:
message = (
f"Ignoring request to remove required connectivity "
f"({cf_role}: {excluded!r})"
)
logger.debug(message, extra=dict(cls=self.__class__.__name__))
for cf_role in removal_dict.keys():
self._members[cf_role] = None
return removal_dict
class _Mesh1DConnectivityManager(_MeshConnectivityManagerBase):
REQUIRED = ("edge_node_connectivity",)
OPTIONAL = ()
@property
def all_members(self):
return Mesh1DConnectivities(edge_node=self.edge_node)
@property
def edge_node(self):
return self._members["edge_node_connectivity"]
class _Mesh2DConnectivityManager(_MeshConnectivityManagerBase):
REQUIRED = ("face_node_connectivity",)
OPTIONAL = (
"edge_node_connectivity",
"face_edge_connectivity",
"face_face_connectivity",
"edge_face_connectivity",
"boundary_node_connectivity",
)
@property
def all_members(self):
return Mesh2DConnectivities(
face_node=self.face_node,
edge_node=self.edge_node,
face_edge=self.face_edge,
face_face=self.face_face,
edge_face=self.edge_face,
boundary_node=self.boundary_node,
)
@property
def boundary_node(self):
return self._members["boundary_node_connectivity"]
@property
def edge_face(self):
return self._members["edge_face_connectivity"]
@property
def edge_node(self):
return self._members["edge_node_connectivity"]
@property
def face_edge(self):
return self._members["face_edge_connectivity"]
@property
def face_face(self):
return self._members["face_face_connectivity"]
@property
def face_node(self):
return self._members["face_node_connectivity"]
class MeshCoord(AuxCoord):
"""
Geographic coordinate values of data on an unstructured mesh.
A MeshCoord references a `~iris.experimental.ugrid.mesh.Mesh`.
When contained in a `~iris.cube.Cube` it connects the cube to the Mesh.
It records (a) which 1-D cube dimension represents the unstructured mesh,
and (b) which mesh 'location' the cube data is mapped to -- i.e. is it
data on 'face's, 'edge's or 'node's.
A MeshCoord also specifies its 'axis' : 'x' or 'y'. Its values are then,
accordingly, longitudes or latitudes. The values are taken from the
appropriate coordinates and connectivities in the Mesh, determined by its
'location' and 'axis'.
Any cube with data on a mesh will have a MeshCoord for each axis,
i.e. an 'X' and a 'Y'.
The points and bounds contain coordinate values for the mesh elements,
which depends on location.
For 'node', the ``.points`` contains node locations.
For 'edge', the ``.bounds`` contains edge endpoints, and the ``.points`` contain
edge locations (typically centres), if the Mesh contains them (optional).
For 'face', the ``.bounds`` contain the face corners, and the ``.points`` contain the
face locations (typically centres), if the Mesh contains them (optional).
.. note::
As described above, it is possible for a MeshCoord to have bounds but
no points. This is not possible for a regular
:class:`~iris.coords.AuxCoord` or :class:`~iris.coords.DimCoord`.
.. note::
A MeshCoord can not yet actually be created with bounds but no points.
This is intended in future, but for now it raises an error.
"""
def __init__(
self,
mesh,
location,
axis,
):
# Setup the metadata.
self._metadata_manager = metadata_manager_factory(MeshCoordMetadata)
# Validate and record the class-specific constructor args.
if not isinstance(mesh, Mesh):
msg = (
"'mesh' must be an "
f"{Mesh.__module__}.{Mesh.__name__}, "
f"got {mesh}."
)
raise TypeError(msg)
# Handled as a readonly ".mesh" property.
# NOTE: currently *not* included in metadata. In future it might be.
self._mesh = mesh
if location not in Mesh.ELEMENTS:
msg = (
f"'location' of {location} is not a valid Mesh location', "
f"must be one of {Mesh.ELEMENTS}."
)
raise ValueError(msg)
# Held in metadata, readable as self.location, but cannot set it.
self._metadata_manager.location = location
if axis not in Mesh.AXES:
# The valid axes are defined by the Mesh class.
msg = (
f"'axis' of {axis} is not a valid Mesh axis', "
f"must be one of {Mesh.AXES}."
)
raise ValueError(msg)
# Held in metadata, readable as self.axis, but cannot set it.
self._metadata_manager.axis = axis
points, bounds = self._construct_access_arrays()
if points is None:
# TODO: we intend to support this in future, but it will require
# extra work to refactor the parent classes.
msg = "Cannot yet create a MeshCoord without points."
raise ValueError(msg)
# Get the 'coord identity' metadata from the relevant node-coordinate.
node_coord = self.mesh.coord(include_nodes=True, axis=self.axis)
# Call parent constructor to handle the common constructor args.
super().__init__(
points,
bounds=bounds,
standard_name=node_coord.standard_name,
long_name=node_coord.long_name,
var_name=None, # We *don't* "represent" the underlying node var
units=node_coord.units,
attributes=node_coord.attributes,
)
# Define accessors for MeshCoord-specific properties mesh/location/axis.
# These are all read-only.
@property
def mesh(self):
return self._mesh
@property
def location(self):
return self._metadata_manager.location
@property
def axis(self):
return self._metadata_manager.axis
# Provide overrides to mimic the Coord-specific properties that are not
# supported by MeshCoord, i.e. "coord_system" and "climatological".
# These mimic the Coord properties, but always return fixed 'null' values.
# They can be set, to the 'null' value only, for the inherited init code.
@property
def coord_system(self):
"""The coordinate-system of a MeshCoord is always 'None'."""
return None
@coord_system.setter
def coord_system(self, value):
if value is not None:
msg = "Cannot set the coordinate-system of a MeshCoord."
raise ValueError(msg)
@property
def climatological(self):
"""The 'climatological' of a MeshCoord is always 'False'."""
return False
@climatological.setter
def climatological(self, value):
if value:
msg = "Cannot set 'climatological' on a MeshCoord."
raise ValueError(msg)
def __getitem__(self, keys):
# Disallow any sub-indexing, permitting *only* "self[:,]".
# We *don't* intend here to support indexing as such : the exception is
# just sufficient to enable cube slicing, when it does not affect the
# mesh dimension. This works because Cube.__getitem__ passes us keys
# "normalised" with iris.util._build_full_slice_given_keys.
if keys != (slice(None),):
msg = "Cannot index a MeshCoord."
raise ValueError(msg)
# Translate "self[:,]" as "self.copy()".
return self.copy()
def copy(self, points=None, bounds=None):
"""
Make a copy of the MeshCoord.
Kwargs:
* points, bounds (array):
Provided solely for signature compatibility with other types of
:class:`~iris.coords.Coord`.
In this case, if either is not 'None', an error is raised.
"""
# Override Coord.copy, so that we can ensure it does not duplicate the
# Mesh object (via deepcopy).
# This avoids copying Meshes.
# FOR NOW: also disallow changing points/bounds at all.
if points is not None or bounds is not None:
msg = "Cannot change the content of a MeshCoord."
raise ValueError(msg)
# Make a new MeshCoord with the same args : The Mesh is the *same*
# as the original (not a copy).
new_coord = MeshCoord(
mesh=self.mesh, location=self.location, axis=self.axis
)
return new_coord
def __deepcopy__(self, memo):
"""
Make this equivalent to "shallow" copy, returning a new MeshCoord based
on the same Mesh.
Required to prevent cube copying from copying the Mesh, which would
prevent "cube.copy() == cube" : see notes for :meth:`copy`.
"""
return self.copy()
# Override _DimensionalMetadata.__eq__, to add 'mesh' comparison into the
# default implementation (which compares metadata, points and bounds).
# This is needed because 'mesh' is not included in our metadata.
def __eq__(self, other):
eq = NotImplemented
if isinstance(other, MeshCoord):
# *Don't* use the parent (_DimensionalMetadata) __eq__, as that
# will try to compare points and bounds arrays.
# Just compare the mesh, and the (other) metadata.
eq = self.mesh == other.mesh # N.B. 'mesh' not in metadata.
if eq is not NotImplemented and eq:
# Compare rest of metadata, but not points/bounds.
eq = self.metadata == other.metadata
return eq
# Exactly as for Coord.__hash__ : See there for why.
def __hash__(self):
return hash(id(self))
def summary(self, *args, **kwargs):
# We need to specialise _DimensionalMetadata.summary, so that we always
# print the mesh+location of a MeshCoord.
if len(args) > 0:
shorten = args[0]
else:
shorten = kwargs.get("shorten", False)
# Get the default-form result.
if shorten:
# NOTE: we simply aren't interested in the values for the repr,
# so fix linewidth to suppress them
kwargs["linewidth"] = 1
# Plug private key, to get back the section structure info
section_indices = {}
kwargs["_section_indices"] = section_indices
result = super().summary(*args, **kwargs)
# Modify the generic 'default-form' result to produce what we want.
if shorten:
# Single-line form : insert mesh+location before the array part
# Construct a text detailing the mesh + location
mesh_string = self.mesh.name()
if mesh_string == "unknown":
# If no name, replace with the one-line summary
mesh_string = self.mesh.summary(shorten=True)
extra_str = f"mesh({mesh_string}) location({self.location}) "
# find where in the line the data-array text begins
i_line, i_array = section_indices["data"]
assert i_line == 0
# insert the extra text there
result = result[:i_array] + extra_str + result[i_array:]
# NOTE: this invalidates the original width calculation and may
# easily extend the result beyond the intended maximum linewidth.
# We do treat that as an advisory control over array printing, not
# an absolute contract, so just ignore the problem for now.
else:
# Multiline form
# find where the "location: ... " section is
i_location, i_namestart = section_indices["location"]
lines = result.split("\n")
location_line = lines[i_location]
# copy the indent spacing
indent = location_line[:i_namestart]
# use that to construct a suitable 'mesh' line
mesh_string = self.mesh.summary(shorten=True)
mesh_line = f"{indent}mesh: {mesh_string}"
# Move the 'location' line, putting it and the 'mesh' line right at
# the top, immediately after the header line.
del lines[i_location]
lines[1:1] = [mesh_line, location_line]
# Re-join lines to give the result
result = "\n".join(lines)
return result
def _construct_access_arrays(self):
"""
Build lazy points and bounds arrays, providing dynamic access via the
Mesh, according to the location and axis.
Returns:
* points, bounds (array or None):
lazy arrays which calculate the correct points and bounds from the
Mesh data, based on the location and axis.
The Mesh coordinates accessed are not identified on construction,
but discovered from the Mesh at the time of calculation, so that
the result is always based on current content in the Mesh.
"""
mesh, location, axis = self.mesh, self.location, self.axis
node_coord = self.mesh.coord(include_nodes=True, axis=axis)
if location == "node":
points_coord = node_coord
bounds_connectivity = None
elif location == "edge":
points_coord = self.mesh.coord(include_edges=True, axis=axis)
bounds_connectivity = mesh.edge_node_connectivity
elif location == "face":
points_coord = self.mesh.coord(include_faces=True, axis=axis)
bounds_connectivity = mesh.face_node_connectivity
# The points output is the points of the relevant element-type coord.
points = points_coord.core_points()
if bounds_connectivity is None:
bounds = None
else:
# Bounds are calculated from a connectivity and the node points.
# Data can be real or lazy, so operations must work in Dask, too.
indices = bounds_connectivity.core_indices()
# Normalise indices dimension order to [faces/edges, bounds]
indices = bounds_connectivity.indices_by_location(indices)
# Normalise the start index
indices = indices - bounds_connectivity.start_index
node_points = node_coord.core_points()
n_nodes = node_points.shape[0]
# Choose real/lazy array library, to suit array types.
lazy = _lazy.is_lazy_data(indices) or _lazy.is_lazy_data(
node_points
)
al = da if lazy else np
# NOTE: Dask cannot index with a multidimensional array, so we
# must flatten it and restore the shape later.
flat_inds = indices.flatten()
# NOTE: the connectivity array can have masked points, but we can't
# effectively index with those. So use a non-masked index array
# with "safe" index values, and post-mask the results.
flat_inds_nomask = al.ma.filled(flat_inds, -1)
# Note: *also* mask any places where the index is out of range.
missing_inds = (flat_inds_nomask < 0) | (
flat_inds_nomask >= n_nodes
)
flat_inds_safe = al.where(missing_inds, 0, flat_inds_nomask)
# Here's the core indexing operation.
# The comma applies all inds-array values to the *first* dimension.
bounds = node_points[
flat_inds_safe,
]
# Fix 'missing' locations, and restore the proper shape.
bounds = al.ma.masked_array(bounds, missing_inds)
bounds = bounds.reshape(indices.shape)
return points, bounds
|
SciTools/iris
|
lib/iris/experimental/ugrid/mesh.py
|
Python
|
lgpl-3.0
| 108,372
|
[
"NetCDF"
] |
8dc532465a7ffc88e466d2121c8e53cc3e11b5cf984e3a77b26dbd26234d09a5
|
#!/usr/bin/python
# python parser module for lattice preparation from bowtie 23/6/2012
# version 3 16-4-2014
# Usage mirlattice_preparator.py <bowtie_out> <output file> <norm_factor> <bowtie index> <option tag>
import sys, subprocess
from collections import defaultdict
from smRtools import *
from numpy import mean, median, std
class LatticeRNA (SmRNAwindow):
'''overloading of the smRNAwindow class for objects with only forward reads (typically mRNA matched by reads)'''
def readmap (self):
readmap = {}
for offset in self.readDict.keys():
readmap[offset] = len(self.readDict[offset])
return readmap
def normalizedreadmap (self):
MaxOffset = self.size
readmap = {}
thevalues=[]
for offset in self.readDict.keys():
thevalues.append(len(self.readDict[offset]))
try: MaxValue = max(thevalues)
except: MaxValue = 0
for offset in self.readDict:
readmap[offset/float(MaxOffset)] = len(self.readDict[offset])/float(MaxValue)
return readmap
def meansizeatoffset (self, estimator_function, offset):
return estimator_function(self.readDict[offset])
def meansizemap (self, estimator_function):
meansizedic = {}
for offset in self.readDict.keys():
meansizedic[offset] = estimator_function(self.readDict[offset])
return meansizedic
def medianesizemap (self):
medianesizedic = {}
for offset in self.readDict.keys():
medianesizedic[offset] = median(self.readDict[offset])
return medianesizedic
def density (self):
'''method to output the read coverage by position in the mir'''
map = [0 for i in range (len(self.sequence))]
for offset, size in self.dicmap:
for i in range (offset, offset+size):
map[i] += self.dicmap[(offset,size)]
return map
def normalized_density (self):
map = self.density ()
maximum = float (max (map) ) or 1
length = float (len (map) ) or 1
Total_NoR = self.mircount()
output = ["mir\tcoordinate\tdensity\tNoR"]
for i, D in enumerate (map):
output.append("%s\t%s\t%s\t%s" % (self.name, (i+1)/length, D/maximum, Total_NoR))
return "\n".join(output)
if sys.argv[-1] == "--extract_index":
ItemDic = get_fasta (sys.argv[-2])
else:
ItemDic = get_fasta_from_history (sys.argv[-2])
ObjectDic = {}
for item in ItemDic:
ObjectDic[item] = LatticeRNA(item, ItemDic[item])
F = open (sys.argv[1], "r")
for line in F:
fields = line.split()
name = fields[1]
offset= int(fields[2])
sequence= fields[3]
ObjectDic[name].addread("+", offset, len(sequence))
F.close()
norm_factor = sys.argv[3]
norm_factor = float(norm_factor)
F = open (sys.argv[2], "w")
print >> F, "gene\toffset\tcount\tnormOffset\tnormCount\tmedianesize\ttotal_count"
for item in sorted(ObjectDic):
for offset, normoffset in zip (sorted(ObjectDic[item].readDict), sorted(ObjectDic[item].normalizedreadmap()) ):
print >> F, "%s\t%s\t%s\t%s\t%s\t%s\t%s" % (item, offset, len(ObjectDic[item].readDict[offset])*norm_factor, normoffset, ObjectDic[item].normalizedreadmap()[normoffset], int(ObjectDic[item].meansizeatoffset(median, offset)), ObjectDic[item].forwardreadcount()*norm_factor )
F.close()
|
JuPeg/tools-artbio
|
unstable/local_tools/mirlattice_preparator.py
|
Python
|
mit
| 3,181
|
[
"Bowtie"
] |
9d758340a0e34bb6508bf76a6ddb79e8c5ede1ab6a72f32c39f86790c59fce3b
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from mock import patch
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation
from zilencer.models import Deployment
from zerver.forms import HomepageForm
from zerver.views import do_change_password
from zerver.views.invite import get_invitee_emails_set
from zerver.models import (
get_realm, get_prereg_user_by_email, get_user_profile_by_email,
PreregistrationUser, Realm, RealmAlias, Recipient,
Referral, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription, ScheduledJob
)
from zerver.management.commands.deliver_email import send_email_job
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import do_deactivate_realm, do_set_realm_default_language, \
add_new_user_history
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import (
enqueue_welcome_emails, one_click_unsubscribe_link, send_local_email_template_with_delay)
from zerver.lib.test_helpers import find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
from zerver.context_processors import common_context
import re
import ujson
from six.moves import urllib
from six.moves import range
import six
from typing import Any, Text
import os
class PublicURLTest(ZulipTestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
# type: (str, List[str], int) -> None
for url in urls:
# e.g. self.client_post(url) if method is "post"
response = getattr(self, method)(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
# type: () -> None
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
get_urls = {200: ["/accounts/home/", "/accounts/login/"
"/en/accounts/home/", "/ru/accounts/home/",
"/en/accounts/login/", "/ru/accounts/login/",
"/help/"],
302: ["/", "/en/", "/ru/"],
401: ["/json/streams/Denmark/members",
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
"/json/messages",
"/api/v1/streams",
],
404: ["/help/nonexistent"],
}
# Add all files in 'templates/zerver/help' directory (except for 'main.html' and
# 'index.md') to `get_urls['200']` list.
for doc in os.listdir('./templates/zerver/help'):
if doc not in {'main.html', 'index.md', 'include'}:
get_urls[200].append('/help/' + os.path.splitext(doc)[0]) # Strip the extension.
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/messages",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/exists",
"/json/subscriptions/property",
"/json/fetch_api_key",
"/json/users/me/pointer",
"/json/users/me/subscriptions",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
put_urls = {401: ["/json/users/me/pointer"],
}
for status_code, url_set in six.iteritems(get_urls):
self.fetch("client_get", url_set, status_code)
for status_code, url_set in six.iteritems(post_urls):
self.fetch("client_post", url_set, status_code)
for status_code, url_set in six.iteritems(put_urls):
self.fetch("client_put", url_set, status_code)
def test_get_gcid_when_not_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(get_realm("zulip"), stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test@zulip.com", "test")
user_profile = get_user_profile_by_email("test@zulip.com")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("hamlet@zulip.com", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = 'hamlet@zulip.com'
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password.', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("hamlet@zulip.com", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
with queries_captured() as queries:
self.register("test@zulip.com", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test@zulip.com", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("hamlet@zulip.com")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test@zulip.com", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams):
# type: (str, List[Text]) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams})
def check_sent_emails(self, correct_recipients):
# type: (List[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client_post('/json/bulk_invite_users', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("hamlet@zulip.com", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test@zulip.com", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "foo@zulip.com"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_refer_friend(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
invitee = "alice-test@zulip.com"
result = self.client_post('/json/refer_friend', dict(email=invitee))
self.assert_json_success(result)
# verify this works
Referral.objects.get(user_profile=user, email=invitee)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
current_user_email = "hamlet@zulip.com"
self.login(current_user_email)
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
data = {"email": invitee, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=0),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
email_jobs_to_deliver = ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=datetime.datetime.utcnow())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
self.assertTrue(send_email_job(job))
self.assertEqual(len(outbox), email_count + 1)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.COMMUNITY)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_with_subdomain(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
password = "test"
email = "user1@test.com"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'mit': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
email = "newguy@zulip.com"
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_default_language(realm, "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_unique_completely_open_domain(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
realm = get_realm('mit')
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_completely_open_domain_success(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm('zulip')
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=realm)
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@zulip.com'}, realm=realm)
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=None)
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"newuser@zulip.com"],
result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
password = "test"
email = "sipbtest@mit.edu"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = 'hamlet@zulip.com'
user_2 = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
|
AZtheAsian/zulip
|
zerver/tests/test_signup.py
|
Python
|
apache-2.0
| 50,381
|
[
"VisIt"
] |
daa4495145663fb880f94591a5f3c183c8c44a3771c690b792ed6b8dd0dc0607
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Lee-Ping Wang
# Contributors: Robert McGibbon and Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import itertools
import numpy as np
from mdtraj.utils import cast_indices, in_units_of
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils.six import string_types
from mdtraj.utils.six.moves import xrange
__all__ = ['ArcTrajectoryFile', 'load_arc']
##############################################################################
# Classes
##############################################################################
class _EOF(IOError):
pass
@FormatRegistry.register_loader('.arc')
def load_arc(filename, stride=None, atom_indices=None, frame=None):
"""Load a TINKER .arc file from disk.
Parameters
----------
filename : str
String filename of TINKER .arc file.
stride : int, default=None
Only read every stride-th frame
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
See Also
--------
mdtraj.ArcTrajectoryFile : Low level interface to TINKER .arc files
"""
from mdtraj.core.trajectory import _parse_topology
if not isinstance(filename, string_types):
raise TypeError('filename must be of type string for load_arc. '
'you supplied %s' % type(filename))
atom_indices = cast_indices(atom_indices)
with ArcTrajectoryFile(filename) as f:
if frame is not None:
f.seek(frame)
n_frames = 1
else:
n_frames = None
return f.read_as_traj(n_frames=n_frames, stride=stride,
atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.arc')
class ArcTrajectoryFile(object):
"""Interface for reading and writing to an TINKER archive files.
(Note that the TINKER .xyz format is identical to this.) This is
a file-like object, that both reading or writing depending on the
`mode` flag. It implements the context manager protocol, so you
can also use it with the python 'with' statement.
The conventional units in the arc file is angstrom. The format only
supports storing the cartesian coordinates and box lengths.
Attributes
----------
topology : Topology
A single-chain, single-residue topology generated from the atom and bond
information found in the TINKER archive/xyz file. It is only generated
from the first member of the archive
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r'}
The mode in which to open the file, only 'r' for read is supported.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=True):
"""Open an TINKER.arc file for reading/writing.
"""
self._is_open = False
self._filename = filename
self._frame_index = 0
self._mode = mode
self.topology = None
if mode == 'w':
raise ValueError('Writing TINKER .arc files is not supported at this time')
# track which line we're on. this is not essential, but its useful
# when reporting errors to the user to say what line it occured on.
self._line_counter = 0
if mode == 'r':
# if n_atoms is None:
# raise ValueError('To open a mdcrd file in mode="r", you must '
# 'supply the number of atoms, "n_atoms"')
if not os.path.exists(filename):
raise IOError("The file '%s' doesn't exist" % filename)
self._fh = open(filename, 'r')
self._is_open = True
else:
raise ValueError('mode must be "r". '
'you supplied "%s"' % mode)
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
raise NotImplementedError()
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
raise NotImplementedError()
def close(self):
"""Close the .arc file"""
if self._is_open:
self._fh.close()
self._is_open = False
def __del__(self):
self.close()
def __enter__(self):
"Support the context manager protocol"
return self
def __exit__(self, *exc_info):
"Support the context manager protocol"
self.close()
def __len__(self):
"Number of frames in the file"
raise NotImplementedError()
def read_as_traj(self, n_frames=None, stride=None, atom_indices=None):
"""Read a trajectory from a ARC file
Parameters
----------
n_frames : int, optional
If positive, then read only the next `n_frames` frames. Otherwise read all
of the frames in the file.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
See Also
--------
read : Returns the raw data from the file
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = self.topology.subset(atom_indices)
initial = int(self._frame_index)
xyz, abc, ang = self.read(n_frames=n_frames, stride=stride, atom_indices=atom_indices)
if len(xyz) == 0:
return Trajectory(xyz=np.zeros((0, topology.n_atoms, 3)), topology=topology)
in_units_of(xyz, self.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(abc, self.distance_unit, Trajectory._distance_unit, inplace=True)
if stride is None:
stride = 1
time = (stride*np.arange(len(xyz))) + initial
return Trajectory(xyz=xyz, topology=self.topology, time=time,
unitcell_lengths=abc,
unitcell_angles=ang)
def read(self, n_frames=None, stride=None, atom_indices=None):
"""Read data from a TINKER .arc file.
Note that only the
Cartesian coordinates are read in. The .arc file also
contains TINKER-specific numeric atom types and some bonding
information, which we do not read in.
Parameters
----------
n_frames : int, None
The number of frames you would like to read from the file.
If None, all of the remaining frames will be loaded.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates
from the file.
Returns
-------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3), dtype=np.float32
The cartesian coordinates, in angstroms
"""
if not self._mode == 'r':
raise ValueError('read() is only available when file is opened '
'in mode="r"')
if n_frames is None:
frame_counter = itertools.count()
else:
frame_counter = xrange(n_frames)
if stride is None:
stride = 1
coords = []
lengths = []
angles = []
for i in frame_counter:
try:
coord, length, angle = self._read()
if atom_indices is not None:
coord = coord[atom_indices, :]
except _EOF:
break
coords.append(coord)
lengths.append(length)
angles.append(angle)
for j in range(stride - 1):
# throw away these frames
self._read()
coords = np.array(coords)
if any(l is None for l in lengths):
lengths = angles = None
else:
lengths = np.array(lengths)
angles = np.array(angles)
return coords, lengths, angles
def _read(self):
"Read a single frame"
from mdtraj.core.topology import Topology
from mdtraj.core.element import Element, virtual
# Read in the number of atoms.
line = self._fh.readline()
if line == '':
raise _EOF()
self._n_atoms = int(line.split()[0])
self._line_counter += 1
coords = np.empty((self._n_atoms, 3), dtype=np.float32)
bond_partners = [[] for i in xrange(self._n_atoms)]
atom_names = ['' for i in xrange(self._n_atoms)]
line = self._fh.readline()
s = line.split()
self._line_counter += 1
# See if we have box info on this line or not
cell_lengths = cell_angles = None
if len(s) == 6:
try:
cell_lengths = np.asarray(
[float(s[0]), float(s[1]), float(s[2])]
)
cell_angles = np.asarray(
[float(s[3]), float(s[4]), float(s[5])]
)
line = self._fh.readline()
s = line.split()
self._line_counter += 1
except ValueError:
pass
i = 0
while i < self._n_atoms - 1:
atom_names[i] = s[1]
bond_partners[i] = [int(x) for x in s[6:]]
coords[i,:] = [float(s[pos]) for pos in [2, 3, 4]]
i += 1
line = self._fh.readline()
s = line.split()
self._line_counter += 1
# Now do the last atom
atom_names[i] = s[1]
bond_partners[i] = [int(x) for x in s[6:]]
coords[i,:] = [float(s[pos]) for pos in [2, 3, 4]]
# Now see if we have to build a topology
if self.topology is None:
self.topology = top = Topology()
chain = top.add_chain() # only 1 chain
res = top.add_residue('RES', chain, 1) # only 1 residue
for at in atom_names:
# First get the element. Try for common 2-letter elements, then
# use the first letter only (default to None if I can't find it)
if at[:2].upper() in ('NA', 'CL', 'MG'):
elem = Element.getBySymbol(at[:2])
else:
try:
elem = Element.getBySymbol(at[0])
except KeyError:
elem = virtual
top.add_atom(at, elem, res)
# Now add the bonds
atoms = list(top.atoms)
for i, bonds in enumerate(bond_partners):
me = atoms[i]
for b in bonds:
b -= 1
if b < i: continue
it = atoms[b]
top.add_bond(me, it)
self._frame_index += 1
return coords, cell_lengths, cell_angles
def write(self, xyz):
""" The ArcTrajectoryFile does not have a write method,
because TINKER .arc files have special numerical atom types
which are not shared by any other trajectory file format.
Parameters
----------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms to write.
"""
raise RuntimeError('write() is not available for .arc files')
|
leeping/mdtraj
|
mdtraj/formats/arc.py
|
Python
|
lgpl-2.1
| 13,725
|
[
"MDTraj",
"TINKER"
] |
aae4c6e78c03193f60a5553dee93103d95c48abfe61b97cbc0e882995e84c0fd
|
# Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import anyconfig
from molecule import interpolation
from molecule import logger
from molecule import platforms
from molecule import scenario
from molecule import state
from molecule import util
from molecule.dependency import ansible_galaxy
from molecule.dependency import gilt
from molecule.driver import delegated
from molecule.driver import docker
from molecule.driver import ec2
from molecule.driver import gce
from molecule.driver import lxc
from molecule.driver import lxd
from molecule.driver import kvm
from molecule.driver import openstack
from molecule.driver import vagrant
from molecule.lint import yamllint
from molecule.model import schema
from molecule.provisioner import ansible
from molecule.verifier import goss
from molecule.verifier import testinfra
LOG = logger.get_logger(__name__)
MOLECULE_DIRECTORY = 'molecule'
MOLECULE_FILE = 'molecule.yml'
MERGE_STRATEGY = anyconfig.MS_DICTS
class Config(object):
"""
Molecule searches the current directory for `molecule.yml` files by
globbing `molecule/*/molecule.yml`. The files are instantiated into
a list of Molecule :class:`.Config` objects, and each Molecule subcommand
operates on this list.
The directory in which the `molecule.yml` resides is the Scenario's
directory. Molecule performs most functions within this directory.
The :class:`.Config` object has instantiated Dependency_, Driver_,
:ref:`root_lint`, Platforms_, Provisioner_, Verifier_,
:ref:`root_scenario`, and State_ references.
"""
def __init__(self,
molecule_file,
args={},
command_args={},
ansible_args=()):
"""
Initialize a new config class and returns None.
:param molecule_file: A string containing the path to the Molecule file
to be parsed.
:param args: An optional dict of options, arguments and commands from
the CLI.
:param command_args: An optional dict of options passed to the
subcommand from the CLI.
:param ansible_args: An optional tuple of arguments provided to the
`ansible-playbook` command.
:returns: None
"""
self.molecule_file = molecule_file
self.args = args
self.command_args = command_args
self.ansible_args = ansible_args
self.config = self._combine()
@property
def debug(self):
return self.args.get('debug', False)
@property
def subcommand(self):
return self.command_args['subcommand']
@property
def ephemeral_directory(self):
return os.path.join(self.scenario.directory, '.molecule')
@property
def project_directory(self):
return os.getcwd()
@property
def molecule_directory(self):
return molecule_directory(self.project_directory)
@property
def dependency(self):
dependency_name = self.config['dependency']['name']
if dependency_name == 'galaxy':
return ansible_galaxy.AnsibleGalaxy(self)
elif dependency_name == 'gilt':
return gilt.Gilt(self)
else:
util.exit_with_invalid_section('dependency', dependency_name)
@property
def driver(self):
driver_name = self._get_driver_name()
driver = None
if driver_name == 'delegated':
driver = delegated.Delegated(self)
elif driver_name == 'docker':
driver = docker.Docker(self)
elif driver_name == 'ec2':
driver = ec2.Ec2(self)
elif driver_name == 'gce':
driver = gce.Gce(self)
elif driver_name == 'lxc':
driver = lxc.Lxc(self)
elif driver_name == 'lxd':
driver = lxd.Lxd(self)
elif driver_name == 'kvm':
driver = kvm.kvm(self)
elif driver_name == 'openstack':
driver = openstack.Openstack(self)
elif driver_name == 'vagrant':
driver = vagrant.Vagrant(self)
else:
util.exit_with_invalid_section('driver', driver_name)
driver.name = driver_name
return driver
@property
def drivers(self):
return molecule_drivers()
@property
def env(self):
return {
'MOLECULE_DEBUG': str(self.debug),
'MOLECULE_FILE': self.molecule_file,
'MOLECULE_INVENTORY_FILE': self.provisioner.inventory_file,
'MOLECULE_EPHEMERAL_DIRECTORY': self.scenario.ephemeral_directory,
'MOLECULE_SCENARIO_DIRECTORY': self.scenario.directory,
'MOLECULE_INSTANCE_CONFIG': self.driver.instance_config,
'MOLECULE_DEPENDENCY_NAME': self.dependency.name,
'MOLECULE_DRIVER_NAME': self.driver.name,
'MOLECULE_LINT_NAME': self.lint.name,
'MOLECULE_PROVISIONER_NAME': self.provisioner.name,
'MOLECULE_SCENARIO_NAME': self.scenario.name,
'MOLECULE_VERIFIER_NAME': self.verifier.name,
}
@property
def lint(self):
lint_name = self.config['lint']['name']
if lint_name == 'yamllint':
return yamllint.Yamllint(self)
else:
util.exit_with_invalid_section('lint', lint_name)
@property
def platforms(self):
return platforms.Platforms(self)
@property
def provisioner(self):
provisioner_name = self.config['provisioner']['name']
if provisioner_name == 'ansible':
return ansible.Ansible(self)
else:
util.exit_with_invalid_section('provisioner', provisioner_name)
@property
def scenario(self):
return scenario.Scenario(self)
@property
def state(self):
return state.State(self)
@property
def verifier(self):
verifier_name = self.config['verifier']['name']
if verifier_name == 'testinfra':
return testinfra.Testinfra(self)
elif verifier_name == 'goss':
return goss.Goss(self)
else:
util.exit_with_invalid_section('verifier', verifier_name)
@property
def verifiers(self):
return molecule_verifiers()
def merge_dicts(self, a, b):
return merge_dicts(a, b)
def _get_driver_name(self):
driver_from_state_file = self.state.driver
driver_from_cli = self.command_args.get('driver_name')
if driver_from_state_file:
driver_name = driver_from_state_file
elif driver_from_cli:
driver_name = driver_from_cli
else:
driver_name = self.config['driver']['name']
if driver_from_cli and (driver_from_cli != driver_name):
msg = ("Instance(s) were created with the '{}' driver, but the "
"subcommand is using '{}' driver.").format(
driver_name, driver_from_cli)
util.sysexit_with_message(msg)
return driver_name
def _combine(self):
"""
Perform a prioritized recursive merge of the `molecule_file` with
defaults, interpolate the result with environment variables, and
returns a new dict.
:return: dict
"""
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
base = self._get_defaults()
with util.open_file(self.molecule_file) as stream:
interpolated_config = i.interpolate(stream.read())
base = self.merge_dicts(base, util.safe_load(interpolated_config))
schema.validate(base)
return base
def _get_defaults(self):
return {
'dependency': {
'name': 'galaxy',
'enabled': True,
'options': {},
'env': {},
},
'driver': {
'name': 'docker',
'provider': {
'name': None
},
'options': {
'managed': True,
},
'ssh_connection_options': [],
'safe_files': [],
},
'lint': {
'name': 'yamllint',
'enabled': True,
'options': {},
'env': {},
},
'platforms': [],
'provisioner': {
'name': 'ansible',
'config_options': {},
'connection_options': {},
'options': {},
'env': {},
'inventory': {
'host_vars': {},
'group_vars': {},
'links': {},
},
'children': {},
'playbooks': {
'create': 'create.yml',
'converge': 'playbook.yml',
'destroy': 'destroy.yml',
'side_effect': None,
},
'lint': {
'name': 'ansible-lint',
'enabled': True,
'options': {},
'env': {},
},
},
'scenario': {
'name':
'default',
'check_sequence': [
'destroy',
'create',
'converge',
'check',
'destroy',
],
'converge_sequence': [
'create',
'converge',
],
'destroy_sequence': [
'destroy',
],
'test_sequence': [
'destroy',
'dependency',
'syntax',
'create',
'converge',
'idempotence',
'lint',
'side_effect',
'verify',
'destroy',
],
},
'verifier': {
'name': 'testinfra',
'enabled': True,
'directory': 'tests',
'options': {},
'env': {},
'additional_files_or_dirs': [],
'lint': {
'name': 'flake8',
'enabled': True,
'options': {},
'env': {},
},
},
}
def merge_dicts(a, b):
"""
Merges the values of B into A and returns a new dict. Uses the same
merge strategy as ``config._combine``.
::
dict a
b:
- c: 0
- c: 2
d:
e: "aaa"
f: 3
dict b
a: 1
b:
- c: 3
d:
e: "bbb"
Will give an object such as::
{'a': 1, 'b': [{'c': 3}], 'd': {'e': "bbb", 'f': 3}}
:param a: the target dictionary
:param b: the dictionary to import
:return: dict
"""
conf = a
anyconfig.merge(a, b, ac_merge=MERGE_STRATEGY)
return conf
def molecule_directory(path):
return os.path.join(path, MOLECULE_DIRECTORY)
def molecule_file(path):
return os.path.join(path, MOLECULE_FILE)
def molecule_drivers():
return [
delegated.Delegated(None).name,
docker.Docker(None).name,
ec2.Ec2(None).name,
gce.Gce(None).name,
lxc.Lxc(None).name,
kvm.kvm(None).name,
lxd.Lxd(None).name,
openstack.Openstack(None).name,
vagrant.Vagrant(None).name,
]
def molecule_verifiers():
return [goss.Goss(None).name, testinfra.Testinfra(None).name]
|
kireledan/molecule
|
molecule/config.py
|
Python
|
mit
| 12,826
|
[
"Galaxy"
] |
86e4e4b4c7d195dc72917d10dea333d8666d0c418f5fec5eb8d6539f543f5c7a
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
#
# The symmetry detection method implemented here is not strictly follow the
# point group detection flowchart. The detection is based on the degeneracy
# of cartesian basis of multipole momentum, eg
# http://symmetry.jacobs-university.de/cgi-bin/group.cgi?group=604&option=4
# see the column of "linear functions, quadratic functions and cubic functions".
#
# Different point groups have different combinations of degeneracy for each
# type of cartesian functions. Based on the degeneracy of cartesian function
# basis, one can quickly filter out a few candidates of point groups for the
# given molecule. Regular operations (rotation, mirror etc) can be applied
# then to identify the symmetry. Current implementation only checks the
# rotation functions and it's roughly enough for D2h and subgroups.
#
# There are special cases this detection method may break down, eg two H8 cube
# molecules sitting on the same center but with random orientation. The
# system is in C1 while this detection method gives O group because the
# 3 rotation bases are degenerated. In this case, the code use the regular
# method (point group detection flowchart) to detect the point group.
#
import sys
import re
import numpy
import scipy.linalg
from pyscf.gto import mole
from pyscf.lib import norm
from pyscf.lib import logger
from pyscf.symm.param import OPERATOR_TABLE
from pyscf import __config__
TOLERANCE = getattr(__config__, 'symm_geom_tol', 1e-5)
# For code compatiblity in python-2 and python-3
if sys.version_info >= (3,):
unicode = str
def parallel_vectors(v1, v2, tol=TOLERANCE):
if numpy.allclose(v1, 0, atol=tol) or numpy.allclose(v2, 0, atol=tol):
return True
else:
cos = numpy.dot(_normalize(v1), _normalize(v2))
return (abs(cos-1) < TOLERANCE) | (abs(cos+1) < TOLERANCE)
def argsort_coords(coords, decimals=None):
if decimals is None:
decimals = int(-numpy.log10(TOLERANCE)) - 1
coords = numpy.around(coords, decimals=decimals)
idx = numpy.lexsort((coords[:,2], coords[:,1], coords[:,0]))
return idx
def sort_coords(coords, decimals=None):
if decimals is None:
decimals = int(-numpy.log10(TOLERANCE)) - 1
coords = numpy.asarray(coords)
idx = argsort_coords(coords, decimals=decimals)
return coords[idx]
# ref. http://en.wikipedia.org/wiki/Rotation_matrix
def rotation_mat(vec, theta):
'''rotate angle theta along vec
new(x,y,z) = R * old(x,y,z)'''
vec = _normalize(vec)
uu = vec.reshape(-1,1) * vec.reshape(1,-1)
ux = numpy.array((
( 0 ,-vec[2], vec[1]),
( vec[2], 0 ,-vec[0]),
(-vec[1], vec[0], 0 )))
c = numpy.cos(theta)
s = numpy.sin(theta)
r = c * numpy.eye(3) + s * ux + (1-c) * uu
return r
# reflection operation with householder
def householder(vec):
vec = _normalize(vec)
return numpy.eye(3) - vec[:,None]*vec*2
def closest_axes(axes, ref):
xcomp, ycomp, zcomp = numpy.einsum('ix,jx->ji', axes, ref)
z_id = numpy.argmax(abs(zcomp))
xcomp[z_id] = ycomp[z_id] = 0 # remove z
x_id = numpy.argmax(abs(xcomp))
ycomp[x_id] = 0 # remove x
y_id = numpy.argmax(abs(ycomp))
return x_id, y_id, z_id
def alias_axes(axes, ref):
'''Rename axes, make it as close as possible to the ref axes
'''
x_id, y_id, z_id = closest_axes(axes, ref)
new_axes = axes[[x_id,y_id,z_id]]
if numpy.linalg.det(new_axes) < 0:
new_axes = axes[[y_id,x_id,z_id]]
return new_axes
def detect_symm(atoms, basis=None, verbose=logger.WARN):
'''Detect the point group symmetry for given molecule.
Return group name, charge center, and nex_axis (three rows for x,y,z)
'''
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(sys.stdout, verbose)
tol = TOLERANCE / numpy.sqrt(1+len(atoms))
decimals = int(-numpy.log10(tol))
log.debug('geometry tol = %g', tol)
rawsys = SymmSys(atoms, basis)
w1, u1 = rawsys.cartesian_tensor(1)
axes = u1.T
log.debug('principal inertia moments %s', w1)
charge_center = rawsys.charge_center
if numpy.allclose(w1, 0, atol=tol):
gpname = 'SO3'
return gpname, charge_center, numpy.eye(3)
elif numpy.allclose(w1[:2], 0, atol=tol): # linear molecule
if rawsys.has_icenter():
gpname = 'Dooh'
else:
gpname = 'Coov'
return gpname, charge_center, axes
else:
w1_degeneracy, w1_degen_values = _degeneracy(w1, decimals)
w2, u2 = rawsys.cartesian_tensor(2)
w2_degeneracy, w2_degen_values = _degeneracy(w2, decimals)
log.debug('2d tensor %s', w2)
n = None
c2x = None
mirrorx = None
if 3 in w1_degeneracy: # T, O, I
# Because rotation vectors Rx Ry Rz are 3-degenerated representation
# See http://www.webqc.org/symmetrypointgroup-td.html
w3, u3 = rawsys.cartesian_tensor(3)
w3_degeneracy, w3_degen_values = _degeneracy(w3, decimals)
log.debug('3d tensor %s', w3)
if (5 in w2_degeneracy and
4 in w3_degeneracy and len(w3_degeneracy) == 3): # I group
gpname, new_axes = _search_i_group(rawsys)
if gpname is not None:
return gpname, charge_center, _refine(new_axes)
elif 3 in w2_degeneracy and len(w2_degeneracy) <= 3: # T/O group
gpname, new_axes = _search_ot_group(rawsys)
if gpname is not None:
return gpname, charge_center, _refine(new_axes)
elif (2 in w1_degeneracy and
numpy.any(w2_degeneracy[w2_degen_values>0] >= 2)):
if numpy.allclose(w1[1], w1[2], atol=tol):
axes = axes[[1,2,0]]
n = rawsys.search_c_highest(axes[2])[1]
if n == 1:
n = None
else:
c2x = rawsys.search_c2x(axes[2], n)
mirrorx = rawsys.search_mirrorx(axes[2], n)
else:
n = -1 # tag as D2h and subgroup
# They must not be I/O/T group, at most one C3 or higher rotation axis
if n is None:
zaxis, n = rawsys.search_c_highest()
if n > 1:
c2x = rawsys.search_c2x(zaxis, n)
mirrorx = rawsys.search_mirrorx(zaxis, n)
if c2x is not None:
axes = _make_axes(zaxis, c2x)
elif mirrorx is not None:
axes = _make_axes(zaxis, mirrorx)
else:
for axis in numpy.eye(3):
if not parallel_vectors(axis, zaxis):
axes = _make_axes(zaxis, axis)
break
else: # Ci or Cs or C1 with degenerated w1
mirror = rawsys.search_mirrorx(None, 1)
if mirror is not None:
xaxis = numpy.array((1.,0.,0.))
axes = _make_axes(mirror, xaxis)
else:
axes = numpy.eye(3)
log.debug('Highest C_n = C%d', n)
if n >= 2:
if c2x is not None:
if rawsys.has_mirror(axes[2]):
gpname = 'D%dh' % n
elif rawsys.has_improper_rotation(axes[2], n):
gpname = 'D%dd' % n
else:
gpname = 'D%d' % n
yaxis = numpy.cross(axes[2], c2x)
axes = _make_axes(axes[2], c2x)
elif mirrorx is not None:
gpname = 'C%dv' % n
axes = _make_axes(axes[2], mirrorx)
elif rawsys.has_mirror(axes[2]):
gpname = 'C%dh' % n
elif rawsys.has_improper_rotation(axes[2], n):
gpname = 'S%d' % (n*2)
else:
gpname = 'C%d' % n
return gpname, charge_center, _refine(axes)
else:
is_c2x = rawsys.has_rotation(axes[0], 2)
is_c2y = rawsys.has_rotation(axes[1], 2)
is_c2z = rawsys.has_rotation(axes[2], 2)
# rotate to old axes, as close as possible?
if is_c2z and is_c2x and is_c2y:
if rawsys.has_icenter():
gpname = 'D2h'
else:
gpname = 'D2'
axes = alias_axes(axes, numpy.eye(3))
elif is_c2z or is_c2x or is_c2y:
if is_c2x:
axes = axes[[1,2,0]]
if is_c2y:
axes = axes[[2,0,1]]
if rawsys.has_mirror(axes[2]):
gpname = 'C2h'
elif rawsys.has_mirror(axes[0]):
gpname = 'C2v'
else:
gpname = 'C2'
else:
if rawsys.has_icenter():
gpname = 'Ci'
elif rawsys.has_mirror(axes[0]):
gpname = 'Cs'
axes = axes[[1,2,0]]
elif rawsys.has_mirror(axes[1]):
gpname = 'Cs'
axes = axes[[2,0,1]]
elif rawsys.has_mirror(axes[2]):
gpname = 'Cs'
else:
gpname = 'C1'
axes = numpy.eye(3)
charge_center = numpy.zeros(3)
return gpname, charge_center, axes
# reduce to D2h and its subgroups
# FIXME, CPL, 209, 506
def get_subgroup(gpname, axes):
if gpname in ('D2h', 'D2' , 'C2h', 'C2v', 'C2' , 'Ci' , 'Cs' , 'C1'):
return gpname, axes
elif gpname in ('SO3',):
#return 'D2h', alias_axes(axes, numpy.eye(3))
return 'Dooh', axes
elif gpname in ('Dooh',):
#return 'D2h', alias_axes(axes, numpy.eye(3))
return 'Dooh', axes
elif gpname in ('Coov',):
#return 'C2v', axes
return 'Coov', axes
elif gpname in ('Oh',):
return 'D2h', alias_axes(axes, numpy.eye(3))
elif gpname in ('O',):
return 'D2', alias_axes(axes, numpy.eye(3))
elif gpname in ('Ih',):
return 'Ci', alias_axes(axes, numpy.eye(3))
elif gpname in ('I',):
return 'C1', axes
elif gpname in ('Td', 'T', 'Th'):
#x,y,z = axes
#x = _normalize(x+y)
#y = numpy.cross(z, x)
#return 'C2v', numpy.array((x,y,z))
return 'D2', alias_axes(axes, numpy.eye(3))
elif re.search(r'S\d+', gpname):
n = int(re.search(r'\d+', gpname).group(0))
if n % 2 == 0:
return 'C%d'%(n//2), axes
else:
return 'Ci', axes
else:
n = int(re.search(r'\d+', gpname).group(0))
if n % 2 == 0:
if re.search(r'D\d+d', gpname):
subname = 'D2'
elif re.search(r'D\d+h', gpname):
subname = 'D2h'
elif re.search(r'D\d+', gpname):
subname = 'D2'
elif re.search(r'C\d+h', gpname):
subname = 'C2h'
elif re.search(r'C\d+v', gpname):
subname = 'C2v'
else:
subname = 'C2'
else:
# rotate axes and
# Dnh -> C2v
# Dn -> C2
# Dnd -> Ci
# Cnh -> Cs
# Cnv -> Cs
if re.search(r'D\d+h', gpname):
subname = 'C2v'
axes = axes[[1,2,0]]
elif re.search(r'D\d+d', gpname):
subname = 'C2h'
axes = axes[[1,2,0]]
elif re.search(r'D\d+', gpname):
subname = 'C2'
axes = axes[[1,2,0]]
elif re.search(r'C\d+h', gpname):
subname = 'Cs'
elif re.search(r'C\d+v', gpname):
subname = 'Cs'
axes = axes[[1,2,0]]
else:
subname = 'C1'
return subname, axes
subgroup = get_subgroup
def as_subgroup(topgroup, axes, subgroup=None):
from pyscf.symm import std_symb
from pyscf.symm.param import SUBGROUP
groupname, axes = get_subgroup(topgroup, axes)
if isinstance(subgroup, (str, unicode)):
subgroup = std_symb(subgroup)
if (groupname == 'D2' and re.search(r'D\d+d', topgroup) and
subgroup in ('C2v', 'Cs')):
# Special treatment for D2d, D4d, .... get_subgroup gives D2 by
# default while C2v is also D2d's subgroup.
groupname = 'C2v'
axes = numpy.einsum('ij,kj->ki', rotation_mat(axes[2], numpy.pi/4), axes)
if subgroup not in SUBGROUP[groupname]:
raise RuntimeError('%s not in Ablien subgroup of %s' %
(subgroup, topgroup))
if subgroup == 'Cs' and groupname == 'C2v':
axes = numpy.einsum('ij,kj->ki', rotation_mat(axes[1], numpy.pi/2), axes)
groupname = subgroup
return groupname, axes
def symm_ops(gpname, axes=None):
if axes is not None:
raise RuntimeError('TODO: non-standard orientation')
op1 = numpy.eye(3)
opi = -1
opc2z = -numpy.eye(3)
opc2z[2,2] = 1
opc2x = -numpy.eye(3)
opc2x[0,0] = 1
opc2y = -numpy.eye(3)
opc2y[1,1] = 1
opcsz = numpy.dot(opc2z, opi)
opcsx = numpy.dot(opc2x, opi)
opcsy = numpy.dot(opc2y, opi)
opdic = {'E' : op1,
'C2z': opc2z,
'C2x': opc2x,
'C2y': opc2y,
'i' : opi,
'sz' : opcsz,
'sx' : opcsx,
'sy' : opcsy,}
return opdic
def symm_identical_atoms(gpname, atoms):
''' Requires '''
from pyscf import gto
# Dooh Coov for linear molecule
if gpname == 'Dooh':
coords = numpy.array([a[1] for a in atoms], dtype=float)
idx0 = argsort_coords(coords)
coords0 = coords[idx0]
opdic = symm_ops(gpname)
newc = numpy.dot(coords, opdic['sz'])
idx1 = argsort_coords(newc)
dup_atom_ids = numpy.sort((idx0,idx1), axis=0).T
uniq_idx = numpy.unique(dup_atom_ids[:,0], return_index=True)[1]
eql_atom_ids = dup_atom_ids[uniq_idx]
eql_atom_ids = [list(sorted(set(i))) for i in eql_atom_ids]
return eql_atom_ids
elif gpname == 'Coov':
eql_atom_ids = [[i] for i,a in enumerate(atoms)]
return eql_atom_ids
charges = numpy.array([gto.charge(a[0]) for a in atoms])
coords = numpy.array([a[1] for a in atoms])
center = numpy.einsum('z,zr->r', charges, coords)/charges.sum()
# if not numpy.allclose(center, 0, atol=TOLERANCE):
# sys.stderr.write('WARN: Molecular charge center %s is not on (0,0,0)\n'
# % center)
opdic = symm_ops(gpname)
ops = [opdic[op] for op in OPERATOR_TABLE[gpname]]
idx = argsort_coords(coords)
coords0 = coords[idx]
dup_atom_ids = []
for op in ops:
newc = numpy.dot(coords, op)
idx = argsort_coords(newc)
if not numpy.allclose(coords0, newc[idx], atol=TOLERANCE):
raise RuntimeError('Symmetry identical atoms not found')
dup_atom_ids.append(idx)
dup_atom_ids = numpy.sort(dup_atom_ids, axis=0).T
uniq_idx = numpy.unique(dup_atom_ids[:,0], return_index=True)[1]
eql_atom_ids = dup_atom_ids[uniq_idx]
eql_atom_ids = [list(sorted(set(i))) for i in eql_atom_ids]
return eql_atom_ids
def check_given_symm(gpname, atoms, basis=None):
# more strict than symm_identical_atoms, we required not only the coordinates
# match, but also the symbols and basis functions
#FIXME: compare the basis set when basis is given
if gpname == 'Dooh':
coords = numpy.array([a[1] for a in atoms], dtype=float)
if numpy.allclose(coords[:,:2], 0, atol=TOLERANCE):
opdic = symm_ops(gpname)
rawsys = SymmSys(atoms, basis)
return rawsys.has_icenter() and numpy.allclose(rawsys.charge_center, 0)
else:
return False
elif gpname == 'Coov':
coords = numpy.array([a[1] for a in atoms], dtype=float)
return numpy.allclose(coords[:,:2], 0, atol=TOLERANCE)
opdic = symm_ops(gpname)
ops = [opdic[op] for op in OPERATOR_TABLE[gpname]]
rawsys = SymmSys(atoms, basis)
for lst in rawsys.atomtypes.values():
coords = rawsys.atoms[lst,1:]
idx = argsort_coords(coords)
coords0 = coords[idx]
for op in ops:
newc = numpy.dot(coords, op)
idx = argsort_coords(newc)
if not numpy.allclose(coords0, newc[idx], atol=TOLERANCE):
return False
return True
def shift_atom(atoms, orig, axis):
c = numpy.array([a[1] for a in atoms])
c = numpy.dot(c - orig, numpy.array(axis).T)
return [[atoms[i][0], c[i]] for i in range(len(atoms))]
class RotationAxisNotFound(RuntimeError):
pass
class SymmSys(object):
def __init__(self, atoms, basis=None):
self.atomtypes = mole.atom_types(atoms, basis)
# fake systems, which treates the atoms of different basis as different atoms.
# the fake systems do not have the same symmetry as the potential
# it's only used to determine the main (Z-)axis
chg1 = numpy.pi - 2
coords = []
fake_chgs = []
idx = []
for k, lst in self.atomtypes.items():
idx.append(lst)
coords.append([atoms[i][1] for i in lst])
ksymb = mole._rm_digit(k)
if ksymb != k:
# Put random charges on the decorated atoms
fake_chgs.append([chg1] * len(lst))
chg1 *= numpy.pi-2
elif mole.is_ghost_atom(k):
if ksymb == 'X' or ksymb.upper() == 'GHOST':
fake_chgs.append([.3] * len(lst))
elif ksymb[0] == 'X':
fake_chgs.append([mole.charge(ksymb[1:])+.3] * len(lst))
elif ksymb[:5] == 'GHOST':
fake_chgs.append([mole.charge(ksymb[5:])+.3] * len(lst))
else:
fake_chgs.append([mole.charge(ksymb)] * len(lst))
coords = numpy.array(numpy.vstack(coords), dtype=float)
fake_chgs = numpy.hstack(fake_chgs)
self.charge_center = numpy.einsum('i,ij->j', fake_chgs, coords)/fake_chgs.sum()
coords = coords - self.charge_center
idx = numpy.argsort(numpy.hstack(idx))
self.atoms = numpy.hstack((fake_chgs.reshape(-1,1), coords))[idx]
self.group_atoms_by_distance = []
decimals = int(-numpy.log10(TOLERANCE)) - 1
for index in self.atomtypes.values():
index = numpy.asarray(index)
c = self.atoms[index,1:]
dists = numpy.around(norm(c, axis=1), decimals)
u, idx = numpy.unique(dists, return_inverse=True)
for i, s in enumerate(u):
self.group_atoms_by_distance.append(index[idx == i])
def cartesian_tensor(self, n):
z = self.atoms[:,0]
r = self.atoms[:,1:]
ncart = (n+1)*(n+2)//2
natm = len(z)
tensor = numpy.sqrt(numpy.copy(z).reshape(natm,-1) / z.sum())
for i in range(n):
tensor = numpy.einsum('zi,zj->zij', tensor, r).reshape(natm,-1)
e, c = scipy.linalg.eigh(numpy.dot(tensor.T,tensor))
return e[-ncart:], c[:,-ncart:]
def symmetric_for(self, op):
for lst in self.group_atoms_by_distance:
r0 = self.atoms[lst,1:]
r1 = numpy.dot(r0, op)
# FIXME: compare whehter two sets of coordinates are identical
yield all((_vec_in_vecs(x, r0) for x in r1))
def has_icenter(self):
return all(self.symmetric_for(-1))
def has_rotation(self, axis, n):
op = rotation_mat(axis, numpy.pi*2/n).T
return all(self.symmetric_for(op))
def has_mirror(self, perp_vec):
return all(self.symmetric_for(householder(perp_vec).T))
def has_improper_rotation(self, axis, n):
s_op = numpy.dot(householder(axis), rotation_mat(axis, numpy.pi/n)).T
return all(self.symmetric_for(s_op))
def search_possible_rotations(self, zaxis=None):
'''If zaxis is given, the rotation axis is parallel to zaxis'''
maybe_cn = []
for lst in self.group_atoms_by_distance:
natm = len(lst)
if natm > 1:
coords = self.atoms[lst,1:]
# possible C2 axis
for i in range(1, natm):
if abs(coords[0]+coords[i]).sum() > TOLERANCE:
maybe_cn.append((coords[0]+coords[i], 2))
else: # abs(coords[0]-coords[i]).sum() > TOLERANCE:
maybe_cn.append((coords[0]-coords[i], 2))
# atoms of equal distances may be associated with rotation axis > C2.
r0 = coords - coords[0]
distance = norm(r0, axis=1)
eq_distance = abs(distance[:,None] - distance) < TOLERANCE
for i in range(2, natm):
for j in numpy.where(eq_distance[i,:i])[0]:
cos = numpy.dot(r0[i],r0[j]) / (distance[i]*distance[j])
ang = numpy.arccos(cos)
nfrac = numpy.pi*2 / (numpy.pi-ang)
n = int(numpy.around(nfrac))
if abs(nfrac-n) < TOLERANCE:
maybe_cn.append((numpy.cross(r0[i],r0[j]),n))
# remove zero-vectors and duplicated vectors
vecs = numpy.vstack([x[0] for x in maybe_cn])
idx = norm(vecs, axis=1) > TOLERANCE
ns = numpy.hstack([x[1] for x in maybe_cn])
vecs = _normalize(vecs[idx])
ns = ns[idx]
if zaxis is not None: # Keep parallel rotation axes
cos = numpy.dot(vecs, _normalize(zaxis))
vecs = vecs[(abs(cos-1) < TOLERANCE) | (abs(cos+1) < TOLERANCE)]
ns = ns[(abs(cos-1) < TOLERANCE) | (abs(cos+1) < TOLERANCE)]
possible_cn = []
seen = numpy.zeros(len(vecs), dtype=bool)
for k, v in enumerate(vecs):
if not seen[k]:
where1 = numpy.einsum('ix->i', abs(vecs[k:] - v)) < TOLERANCE
where1 = numpy.where(where1)[0] + k
where2 = numpy.einsum('ix->i', abs(vecs[k:] + v)) < TOLERANCE
where2 = numpy.where(where2)[0] + k
seen[where1] = True
seen[where2] = True
vk = _normalize((numpy.einsum('ix->x', vecs[where1]) -
numpy.einsum('ix->x', vecs[where2])))
for n in (set(ns[where1]) | set(ns[where2])):
possible_cn.append((vk,n))
return possible_cn
def search_c2x(self, zaxis, n):
'''C2 axis which is perpendicular to z-axis'''
decimals = int(-numpy.log10(TOLERANCE)) - 1
for lst in self.group_atoms_by_distance:
if len(lst) > 1:
r0 = self.atoms[lst,1:]
zcos = numpy.around(numpy.einsum('ij,j->i', r0, zaxis),
decimals=decimals)
uniq_zcos = numpy.unique(zcos)
maybe_c2x = []
for d in uniq_zcos:
if d > TOLERANCE:
mirrord = abs(zcos+d)<TOLERANCE
if mirrord.sum() == (zcos==d).sum():
above = r0[zcos==d]
below = r0[mirrord]
nelem = len(below)
maybe_c2x.extend([above[0] + below[i]
for i in range(nelem)])
elif abs(d) < TOLERANCE: # plane which crosses the orig
r1 = r0[zcos==d][0]
maybe_c2x.append(r1)
r2 = numpy.dot(rotation_mat(zaxis, numpy.pi*2/n), r1)
if abs(r1+r2).sum() > TOLERANCE:
maybe_c2x.append(r1+r2)
else:
maybe_c2x.append(r2-r1)
if len(maybe_c2x) > 0:
idx = norm(maybe_c2x, axis=1) > TOLERANCE
maybe_c2x = _normalize(maybe_c2x)[idx]
maybe_c2x = _remove_dupvec(maybe_c2x)
for c2x in maybe_c2x:
if (not parallel_vectors(c2x, zaxis) and
self.has_rotation(c2x, 2)):
return c2x
def search_mirrorx(self, zaxis, n):
'''mirror which is parallel to z-axis'''
if n > 1:
for lst in self.group_atoms_by_distance:
natm = len(lst)
r0 = self.atoms[lst[0],1:]
if natm > 1 and not parallel_vectors(r0, zaxis):
r1 = numpy.dot(rotation_mat(zaxis, numpy.pi*2/n), r0)
mirrorx = _normalize(r1-r0)
if self.has_mirror(mirrorx):
return mirrorx
else:
for lst in self.group_atoms_by_distance:
natm = len(lst)
r0 = self.atoms[lst,1:]
if natm > 1:
maybe_mirror = [r0[i]-r0[0] for i in range(1, natm)]
for mirror in _normalize(maybe_mirror):
if self.has_mirror(mirror):
return mirror
def search_c_highest(self, zaxis=None):
possible_cn = self.search_possible_rotations(zaxis)
nmax = 1
cmax = numpy.array([0.,0.,1.])
for cn, n in possible_cn:
if n > nmax and self.has_rotation(cn, n):
nmax = n
cmax = cn
return cmax, nmax
def _normalize(vecs):
vecs = numpy.asarray(vecs)
if vecs.ndim == 1:
return vecs / (numpy.linalg.norm(vecs) + 1e-200)
else:
return vecs / (norm(vecs, axis=1).reshape(-1,1) + 1e-200)
def _vec_in_vecs(vec, vecs):
norm = numpy.sqrt(len(vecs))
return min(numpy.einsum('ix->i', abs(vecs-vec))/norm) < TOLERANCE
def _search_i_group(rawsys):
possible_cn = rawsys.search_possible_rotations()
c5_axes = [c5 for c5, n in possible_cn
if n == 5 and rawsys.has_rotation(c5, 5)]
if len(c5_axes) <= 1:
return None,None
zaxis = c5_axes[0]
cos = numpy.dot(c5_axes, zaxis)
assert(numpy.all((abs(cos[1:]+1/numpy.sqrt(5)) < TOLERANCE) |
(abs(cos[1:]-1/numpy.sqrt(5)) < TOLERANCE)))
if rawsys.has_icenter():
gpname = 'Ih'
else:
gpname = 'I'
c5 = c5_axes[1]
if numpy.dot(c5, zaxis) < 0:
c5 = -c5
c5a = numpy.dot(rotation_mat(zaxis, numpy.pi*6/5), c5)
xaxis = c5a + c5
return gpname, _make_axes(zaxis, xaxis)
def _search_ot_group(rawsys):
possible_cn = rawsys.search_possible_rotations()
c4_axes = [c4 for c4, n in possible_cn
if n == 4 and rawsys.has_rotation(c4, 4)]
if len(c4_axes) > 0: # T group
assert(len(c4_axes) > 1)
if rawsys.has_icenter():
gpname = 'Oh'
else:
gpname = 'O'
return gpname, _make_axes(c4_axes[0], c4_axes[1])
else: # T group
c3_axes = [c3 for c3, n in possible_cn
if n == 3 and rawsys.has_rotation(c3, 3)]
if len(c3_axes) <= 1:
return None, None
cos = numpy.dot(c3_axes, c3_axes[0])
assert(numpy.all((abs(cos[1:]+1./3) < TOLERANCE) |
(abs(cos[1:]-1./3) < TOLERANCE)))
if rawsys.has_icenter():
gpname = 'Th'
# Because C3 axes are on the mirror of Td, two C3 can determine a mirror.
elif rawsys.has_mirror(numpy.cross(c3_axes[0], c3_axes[1])):
gpname = 'Td'
else:
gpname = 'T'
c3a = c3_axes[0]
if numpy.dot(c3a, c3_axes[1]) > 0:
c3a = -c3a
c3b = numpy.dot(rotation_mat(c3a,-numpy.pi*2/3), c3_axes[1])
c3c = numpy.dot(rotation_mat(c3a, numpy.pi*2/3), c3_axes[1])
zaxis, xaxis = c3a+c3b, c3a+c3c
return gpname, _make_axes(zaxis, xaxis)
def _degeneracy(e, decimals):
e = numpy.around(e, decimals)
u, idx = numpy.unique(e, return_inverse=True)
degen = numpy.array([numpy.count_nonzero(idx==i) for i in range(len(u))])
return degen, u
def _pseudo_vectors(vs):
idy0 = abs(vs[:,1])<TOLERANCE
idz0 = abs(vs[:,2])<TOLERANCE
vs = vs.copy()
# ensure z component > 0
vs[vs[:,2]<0] *= -1
# if z component == 0, ensure y component > 0
vs[(vs[:,1]<0) & idz0] *= -1
# if y and z component == 0, ensure x component > 0
vs[(vs[:,0]<0) & idy0 & idz0] *= -1
return vs
def _remove_dupvec(vs):
def rm_iter(vs):
if len(vs) <= 1:
return vs
else:
x = numpy.sum(abs(vs[1:]-vs[0]), axis=1)
rest = rm_iter(vs[1:][x>TOLERANCE])
return numpy.vstack((vs[0], rest))
return rm_iter(_pseudo_vectors(vs))
def _make_axes(z, x):
y = numpy.cross(z, x)
x = numpy.cross(y, z) # because x might not perp to z
return _normalize(numpy.array((x,y,z)))
def _refine(axes):
# Make sure the axes can be rotated from continuous unitary transformation
if axes[2,2] < 0:
axes[2] *= -1
if abs(axes[0,0]) > abs(axes[1,0]):
x_id, y_id = 0, 1
else:
x_id, y_id = 1, 0
if axes[x_id,0] < 0:
axes[x_id] *= -1
if numpy.linalg.det(axes) < 0:
axes[y_id] *= -1
return axes
if __name__ == "__main__":
atom = [["O" , (1. , 0. , 0. ,)],
['H' , (0. , -.757 , 0.587,)],
['H' , (0. , 0.757 , 0.587,)] ]
gpname, orig, axes = detect_symm(atom)
atom = shift_atom(atom, orig, axes)
print(gpname, symm_identical_atoms(gpname, atom))
atom = [['H', (0,0,0)], ['H', (0,0,-1)], ['H', (0,0,1)]]
gpname, orig, axes = detect_symm(atom)
print(gpname, orig, axes)
atom = shift_atom(atom, orig, axes)
print(gpname, symm_identical_atoms(gpname, atom))
atom = [['H', (0., 0., 0.)],
['H', (0., 0., 1.)],
['H', (0., 1., 0.)],
['H', (1., 0., 0.)],
['H', (-1, 0., 0.)],
['H', (0.,-1., 0.)],
['H', (0., 0.,-1.)]]
gpname, orig, axes = detect_symm(atom)
print(gpname, orig, axes)
atom = shift_atom(atom, orig, axes)
print(gpname, symm_identical_atoms(subgroup(gpname, axes)[0], atom))
|
gkc1000/pyscf
|
pyscf/symm/geom.py
|
Python
|
apache-2.0
| 31,112
|
[
"PySCF"
] |
bc1ec687c169bbbd3593f5bd00f479b7dc0d0c8e69103766633cbbdb6fd9782b
|
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
from functools import reduce
from docker.errors import APIError
from .config import get_service_name_from_net, ConfigurationError
from .const import LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF, DEFAULT_TIMEOUT
from .service import Service
from .container import Container
from .legacy import check_for_legacy_containers
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in service.get('volumes_from', []) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(Service(client=client, project=name, links=links, net=net,
volumes_from=volumes_from, **service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_name in service_dict.get('volumes_from', []):
try:
service = self.get_service(volume_name)
volumes_from.append(service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_name)
volumes_from.append(container)
except APIError:
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
if 'net' in service_dict:
net_name = get_service_name_from_net(service_dict.get('net'))
if net_name:
try:
net = self.get_service(net_name)
except NoSuchService:
try:
net = Container.from_id(self.client, net_name)
except APIError:
raise ConfigurationError('Service "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name))
else:
net = service_dict['net']
del service_dict['net']
else:
net = None
return net
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
for service in reversed(self.get_services(service_names)):
service.stop(**options)
def kill(self, service_names=None, **options):
for service in reversed(self.get_services(service_names)):
service.kill(**options)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
allow_recreate=True,
smart_recreate=False,
insecure_registry=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
services = self.get_services(service_names, include_deps=start_deps)
plans = self._get_convergence_plans(
services,
allow_recreate=allow_recreate,
smart_recreate=smart_recreate,
)
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
insecure_registry=insecure_registry,
do_build=do_build,
timeout=timeout
)
]
def _get_convergence_plans(self,
services,
allow_recreate=True,
smart_recreate=False):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies:
log.debug(
'%s has upstream changes (%s)',
service.name, ", ".join(updated_dependencies),
)
plan = service.convergence_plan(
allow_recreate=allow_recreate,
smart_recreate=False,
)
else:
plan = service.convergence_plan(
allow_recreate=allow_recreate,
smart_recreate=smart_recreate,
)
plans[service.name] = plan
return plans
def pull(self, service_names=None, insecure_registry=False):
for service in self.get_services(service_names, include_deps=True):
service.pull(insecure_registry=insecure_registry)
def remove_stopped(self, service_names=None, **options):
for service in self.get_services(service_names):
service.remove_stopped(**options)
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
def matches_service_names(container):
if not service_names:
return True
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
stopped=stopped,
one_off=one_off)
return filter(matches_service_names, containers)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
|
rstacruz/compose
|
compose/project.py
|
Python
|
apache-2.0
| 11,752
|
[
"VisIt"
] |
fc2f551b5c2c054d94ebbb6106da837957bbc515d897ea9de72747fd41b8f9c1
|
"""
Acceptance tests for the teams feature.
"""
from __future__ import absolute_import
import json
import random
import time
from uuid import uuid4
import ddt
from dateutil.parser import parse
from selenium.common.exceptions import TimeoutException
from six.moves import map, range
from common.test.acceptance.fixtures import LMS_BASE_URL
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.fixtures.discussion import ForumsConfigMixin, MultipleThreadFixture, Thread
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.utils import confirm_prompt
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.teams import (
BrowseTeamsPage,
BrowseTopicsPage,
EditMembershipPage,
MyTeamsPage,
TeamManagementPage,
TeamPage,
TeamsPage
)
from common.test.acceptance.tests.helpers import EventsTestMixin, UniqueCourseTest, get_modal_alert
from openedx.core.lib.tests import attr
TOPICS_PER_PAGE = 12
class TeamsTabBase(EventsTestMixin, ForumsConfigMixin, UniqueCourseTest):
"""Base class for Teams Tab tests"""
def setUp(self):
super(TeamsTabBase, self).setUp()
self.tab_nav = TabNavPage(self.browser)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.teams_page = TeamsPage(self.browser, self.course_id)
# TODO: Refactor so resetting events database is not necessary
self.reset_event_tracking()
self.enable_forums()
def create_topics(self, num_topics):
"""Create `num_topics` test topics."""
return [{u"description": i, u"name": i, u"id": i} for i in map(str, range(num_topics))]
def create_teams(self, topic, num_teams, time_between_creation=0):
"""Create `num_teams` teams belonging to `topic`."""
teams = []
for i in range(num_teams):
team = {
'course_id': self.course_id,
'topic_id': topic['id'],
'name': u'Team {}'.format(i),
'description': u'Description {}'.format(i),
'language': 'aa',
'country': 'AF'
}
teams.append(self.post_team_data(team))
# Sadly, this sleep is necessary in order to ensure that
# sorting by last_activity_at works correctly when running
# in Jenkins.
# THIS IS AN ANTI-PATTERN - DO NOT COPY.
time.sleep(time_between_creation)
return teams
def post_team_data(self, team_data):
"""Given a JSON representation of a team, post it to the server."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/teams/',
data=json.dumps(team_data),
headers=self.course_fixture.headers
)
self.assertEqual(response.status_code, 200)
return json.loads(response.text)
def create_memberships(self, num_memberships, team_id):
"""Create `num_memberships` users and assign them to `team_id`. The
last user created becomes the current user."""
memberships = []
for __ in range(num_memberships):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
memberships.append(user_info)
self.create_membership(user_info['username'], team_id)
#pylint: disable=attribute-defined-outside-init
self.user_info = memberships[-1]
return memberships
def create_membership(self, username, team_id):
"""Assign `username` to `team_id`."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/team_membership/',
data=json.dumps({'username': username, 'team_id': team_id}),
headers=self.course_fixture.headers
)
return json.loads(response.text)
def set_team_configuration(self, configuration, enroll_in_course=True, global_staff=False):
"""
Sets team configuration on the course and calls auto-auth on the user.
"""
#pylint: disable=attribute-defined-outside-init
self.course_fixture = CourseFixture(**self.course_info)
if configuration:
self.course_fixture.add_advanced_settings(
{u"teams_configuration": {u"value": configuration}}
)
self.course_fixture.install()
enroll_course_id = self.course_id if enroll_in_course else None
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, course_id=enroll_course_id, staff=global_staff).visit().user_info
self.course_home_page.visit()
def verify_teams_present(self, present):
"""
Verifies whether or not the teams tab is present. If it should be present, also
checks the text on the page (to ensure view is working).
"""
if present:
self.assertIn("Teams", self.tab_nav.tab_names)
self.teams_page.visit()
self.assertEqual(self.teams_page.active_tab(), 'browse')
else:
self.assertNotIn("Teams", self.tab_nav.tab_names)
def verify_teams(self, page, expected_teams):
"""Verify that the list of team cards on the current page match the expected teams in order."""
def assert_team_equal(expected_team, team_card_name, team_card_description):
"""
Helper to assert that a single team card has the expected name and
description.
"""
self.assertEqual(expected_team['name'], team_card_name)
self.assertEqual(expected_team['description'], team_card_description)
team_card_names = page.team_names
team_card_descriptions = page.team_descriptions
list(map(assert_team_equal, expected_teams, team_card_names, team_card_descriptions))
def verify_my_team_count(self, expected_number_of_teams):
""" Verify the number of teams shown on "My Team". """
# We are doing these operations on this top-level page object to avoid reloading the page.
self.teams_page.verify_my_team_count(expected_number_of_teams)
def only_team_events(self, event):
"""Filter out all non-team events."""
return event['event_type'].startswith('edx.team.')
@ddt.ddt
@attr(shard=5)
class TeamsTabTest(TeamsTabBase):
"""
Tests verifying when the Teams tab is present.
"""
def test_teams_not_enabled(self):
"""
Scenario: teams tab should not be present if no team configuration is set
Given I am enrolled in a course without team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(None)
self.verify_teams_present(False)
def test_teams_not_enabled_no_topics(self):
"""
Scenario: teams tab should not be present if team configuration does not specify topics
Given I am enrolled in a course with no topics in the team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": []})
self.verify_teams_present(False)
def test_teams_enabled(self):
"""
Scenario: teams tab should be present if user is enrolled in the course and it has team configuration
Given I am enrolled in a course with team configuration and topics
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(1)})
self.verify_teams_present(True)
def test_teams_enabled_global_staff(self):
"""
Scenario: teams tab should be present if user is not enrolled in the course, but is global staff
Given there is a course with team configuration
And I am not enrolled in that course, but am global staff
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False,
global_staff=True
)
self.verify_teams_present(True)
@ddt.data(
'topics/{topic_id}',
'topics/{topic_id}/search',
'teams/{topic_id}/{team_id}/edit-team',
'teams/{topic_id}/{team_id}'
)
def test_unauthorized_error_message(self, route):
"""Ensure that an error message is shown to the user if they attempt
to take an action which makes an AJAX request while not signed
in.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration(
{u'max_team_size': 10, u'topics': topics},
global_staff=True
)
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
self.browser.delete_cookie('sessionid')
url = self.browser.current_url.split('#')[0]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
)
)
)
self.teams_page.wait_for_ajax()
self.assertEqual(
self.teams_page.warning_message,
u"Your request could not be completed. Reload the page and try again."
)
@ddt.data(
('browse', '.topics-list'),
# TODO: find a reliable way to match the "My Teams" tab
# ('my-teams', 'div.teams-list'),
('teams/{topic_id}/{team_id}', 'div.discussion-module'),
('topics/{topic_id}/create-team', 'div.create-team-instructions'),
('topics/{topic_id}', '.teams-list'),
('not-a-real-route', 'div.warning')
)
@ddt.unpack
def test_url_routing(self, route, selector):
"""Ensure that navigating to a URL route correctly updates the page
content.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration({
u'max_team_size': 10,
u'topics': topics
})
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
# Get the base URL (the URL without any trailing fragment)
url = self.browser.current_url
fragment_index = url.find('#')
if fragment_index >= 0:
url = url[0:fragment_index]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
))
)
self.teams_page.wait_for_page()
self.teams_page.wait_for_ajax()
self.assertTrue(self.teams_page.q(css=selector).present)
self.assertTrue(self.teams_page.q(css=selector).visible)
@attr(shard=5)
class MyTeamsTest(TeamsTabBase):
"""
Tests for the "My Teams" tab of the Teams page.
"""
def setUp(self):
super(MyTeamsTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.my_teams_page = MyTeamsPage(self.browser, self.course_id)
self.page_viewed_event = {
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'my-teams',
'topic_id': None,
'team_id': None
}
}
def test_not_member_of_any_teams(self):
"""
Scenario: Visiting the My Teams page when user is not a member of any team should not display any teams.
Given I am enrolled in a course with a team configuration and a topic but am not a member of a team
When I visit the My Teams page
And I should see no teams
And I should see a message that I belong to no teams.
"""
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.assertEqual(len(self.my_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertEqual(
self.my_teams_page.q(css='.page-content-main').text,
[u'You are not currently a member of any team.']
)
def test_member_of_a_team(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am enrolled in a course with a team configuration and a topic and am a member of a team
When I visit the My Teams page
Then I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should not see a pagination footer
"""
teams = self.create_teams(self.topic, 1)
self.create_membership(self.user_info['username'], teams[0]['id'])
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.verify_teams(self.my_teams_page, teams)
def test_multiple_team_members(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am a member of a team with multiple members
When I visit the My Teams page
Then I should see the correct number of team members on my membership
"""
teams = self.create_teams(self.topic, 1)
self.create_memberships(4, teams[0]['id'])
self.my_teams_page.visit()
self.assertEqual(self.my_teams_page.team_memberships[0], '4 / 10 Members')
@attr(shard=5)
@ddt.ddt
class BrowseTopicsTest(TeamsTabBase):
"""
Tests for the Browse tab of the Teams page.
"""
def setUp(self):
super(BrowseTopicsTest, self).setUp()
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
@ddt.data(('name', False), ('team_count', True))
@ddt.unpack
def test_sort_topics(self, sort_order, reverse):
"""
Scenario: the user should be able to sort the list of topics by name or team count
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
When I choose a sort order
Then I should see the paginated list of topics in that order
"""
topics = self.create_topics(TOPICS_PER_PAGE + 1)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
for i, topic in enumerate(random.sample(topics, len(topics))):
self.create_teams(topic, i)
topic['team_count'] = i
self.topics_page.visit()
self.topics_page.sort_topics_by(sort_order)
topic_names = self.topics_page.topic_names
self.assertEqual(len(topic_names), TOPICS_PER_PAGE)
self.assertEqual(
topic_names,
[t['name'] for t in sorted(topics, key=lambda t: t[sort_order], reverse=reverse)][:TOPICS_PER_PAGE]
)
def test_sort_topics_update(self):
"""
Scenario: the list of topics should remain sorted after updates
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics and choose a sort order
Then I should see the paginated list of topics in that order
When I create a team in one of those topics
And I return to the topics list
Then I should see the topics in the correct sorted order
"""
topics = self.create_topics(3)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
self.topics_page.visit()
self.topics_page.sort_topics_by('team_count')
topic_name = self.topics_page.topic_names[-1]
topic = [t for t in topics if t['name'] == topic_name][0]
self.topics_page.browse_teams_for_topic(topic_name)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
browse_teams_page.wait_for_page()
browse_teams_page.click_create_team_link()
create_team_page = TeamManagementPage(self.browser, self.course_id, topic)
create_team_page.create_team()
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
team_page.click_all_topics()
self.topics_page.wait_for_page()
self.topics_page.wait_for_ajax()
self.assertEqual(topic_name, self.topics_page.topic_names[0])
def test_list_topics(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(2)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 2)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-2 out of 2 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_topic_pagination(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab, paginated 12 per page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see only the first 12 topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(20)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 20 total'))
self.assertTrue(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertTrue(self.topics_page.is_next_page_button_enabled())
def test_go_to_numbered_page(self):
"""
Scenario: topics should be able to be navigated by page number
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter a valid page number in the page number input
Then I should see that page of topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(25)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_go_to_invalid_page(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter an invalid page number in the page number input
Then I should stay on the current page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(self.topics_page.get_current_page_number(), 1)
def test_page_navigation_buttons(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
When I press the next page button
Then I should move to the next page
When I press the previous page button
Then I should move to the previous page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.press_next_page_button()
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 13-13 out of 13 total'))
self.topics_page.press_previous_page_button()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 13 total'))
def test_topic_pagination_one_page(self):
"""
Scenario: Browsing topics when there are fewer topics than the page size i.e. 12
all topics should show on one page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I should see corrected number of topic cards
And I should see the correct page header
And I should not see a pagination footer
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(10)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 10)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
def test_topic_description_truncation(self):
"""
Scenario: excessively long topic descriptions should be truncated so
as to fit within a topic card.
Given I am enrolled in a course with a team configuration and a topic
with a long description
When I visit the Teams page
And I browse topics
Then I should see a truncated topic description
"""
initial_description = "A" + " really" * 50 + " long description"
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [{"name": "", "id": "", "description": initial_description}]}
)
self.topics_page.visit()
truncated_description = self.topics_page.topic_descriptions[0]
self.assertLess(len(truncated_description), len(initial_description))
self.assertTrue(truncated_description.endswith('...'))
self.assertIn(truncated_description.split('...')[0], initial_description)
def test_go_to_teams_list(self):
"""
Scenario: Clicking on a Topic Card should take you to the
teams list for that Topic.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
And I browse topics
And I click on the arrow link to view teams for the first topic
Then I should be on the browse teams page
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
self.topics_page.visit()
self.topics_page.browse_teams_for_topic('Example Topic')
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
browse_teams_page.wait_for_page()
self.assertEqual(browse_teams_page.header_name, 'Example Topic')
self.assertEqual(browse_teams_page.header_description, 'Description')
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse topics page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the browse topics page
Then my browser should post a page viewed event
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'browse',
'topic_id': None,
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.topics_page.visit()
@attr(shard=5)
@ddt.ddt
class BrowseTeamsWithinTopicTest(TeamsTabBase):
"""
Tests for browsing Teams within a Topic on the Teams page.
"""
TEAMS_PAGE_SIZE = 10
def setUp(self):
super(BrowseTeamsWithinTopicTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.max_team_size = 10
self.set_team_configuration({
'course_id': self.course_id,
'max_team_size': self.max_team_size,
'topics': [self.topic]
})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
def teams_with_default_sort_order(self, teams):
"""Return a list of teams sorted according to the default ordering
(last_activity_at, with a secondary sort by open slots).
"""
return sorted(
sorted(teams, key=lambda t: len(t['membership']), reverse=True),
key=lambda t: parse(t['last_activity_at']).replace(microsecond=0),
reverse=True
)
def verify_page_header(self):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(self.browse_teams_page.header_name, self.topic['name'])
self.assertEqual(self.browse_teams_page.header_description, self.topic['description'])
def verify_search_header(self, search_results_page, search_query):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(search_results_page.header_name, 'Team Search')
self.assertEqual(
search_results_page.header_description,
u'Showing results for "{search_query}"'.format(search_query=search_query)
)
def verify_on_page(self, teams_page, page_num, total_teams, pagination_header_text, footer_visible):
"""
Verify that we are on the correct team list page.
Arguments:
teams_page (BaseTeamsPage): The teams page object that should be the current page.
page_num (int): The one-indexed page number that we expect to be on
total_teams (list): An unsorted list of all the teams for the
current topic
pagination_header_text (str): Text we expect to see in the
pagination header.
footer_visible (bool): Whether we expect to see the pagination
footer controls.
"""
sorted_teams = self.teams_with_default_sort_order(total_teams)
self.assertTrue(teams_page.get_pagination_header_text().startswith(pagination_header_text))
self.verify_teams(
teams_page,
sorted_teams[(page_num - 1) * self.TEAMS_PAGE_SIZE:page_num * self.TEAMS_PAGE_SIZE]
)
self.assertEqual(
teams_page.pagination_controls_visible(),
footer_visible,
msg='Expected paging footer to be ' + 'visible' if footer_visible else 'invisible'
)
@ddt.data(
('open_slots', 'last_activity_at', True),
('last_activity_at', 'open_slots', True)
)
@ddt.unpack
def test_sort_teams(self, sort_order, secondary_sort_order, reverse):
"""
Scenario: the user should be able to sort the list of teams by open slots or last activity
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic
When I choose a sort order
Then I should see the paginated list of teams in that order
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
for i, team in enumerate(random.sample(teams, len(teams))):
for _ in range(i):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
self.create_membership(user_info['username'], team['id'])
team['open_slots'] = self.max_team_size - i
# Re-authenticate as staff after creating users
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=True
).visit()
self.browse_teams_page.visit()
self.browse_teams_page.sort_teams_by(sort_order)
team_names = self.browse_teams_page.team_names
self.assertEqual(len(team_names), self.TEAMS_PAGE_SIZE)
sorted_teams = [
team['name']
for team in sorted(
sorted(teams, key=lambda t: t[secondary_sort_order], reverse=reverse),
key=lambda t: t[sort_order],
reverse=reverse
)
][:self.TEAMS_PAGE_SIZE]
self.assertEqual(team_names, sorted_teams)
def test_default_sort_order(self):
"""
Scenario: the list of teams should be sorted by last activity by default
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic, sorted by last activity
"""
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
self.browse_teams_page.visit()
self.assertEqual(self.browse_teams_page.sort_order, 'last activity')
def test_no_teams(self):
"""
Scenario: Visiting a topic with no teams should not display any teams.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing no teams
And I should see no teams
And I should see a button to add a team
And I should not see a pagination footer
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.assertEqual(len(self.browse_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_one_page(self):
"""
Scenario: Visiting a topic with fewer teams than the page size should
all those teams on one page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should see a button to add a team
And I should not see a pagination footer
"""
teams = self.teams_with_default_sort_order(
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE, time_between_creation=1)
)
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.verify_teams(self.browse_teams_page, teams)
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_navigation_buttons(self):
"""
Scenario: The user should be able to page through a topic's team list
using navigation buttons when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I click on the next page button
Then I should see that I am on the second page of results
And when I click on the previous page button
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
self.browse_teams_page.press_next_page_button()
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-11 out of 11 total', True)
self.browse_teams_page.press_previous_page_button()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
def test_teams_page_input(self):
"""
Scenario: The user should be able to page through a topic's team list
using the page input when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I input the second page
Then I should see that I am on the second page of results
When I input the first page
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 10, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
self.browse_teams_page.go_to_page(2)
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-20 out of 20 total', True)
self.browse_teams_page.go_to_page(1)
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
def test_browse_team_topics(self):
"""
Scenario: User should be able to navigate to "browse all teams" and "search team description" links.
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic
Then I should see the correct page header
And I should see the link to "browse teams in other topics"
When I should navigate to that link
Then I should see the topic browse page
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.browse_teams_page.click_browse_all_teams_link()
self.topics_page.wait_for_page()
def test_search(self):
"""
Scenario: User should be able to search for a team
Given I am enrolled in a course with teams enabled
When I visit the Teams page for that topic
And I search for 'banana'
Then I should see the search result page
And the search header should be shown
And 0 results should be shown
And my browser should fire a page viewed event for the search page
And a searched event should have been fired
"""
# Note: all searches will return 0 results with the mock search server
# used by Bok Choy.
search_text = 'banana'
self.create_teams(self.topic, 5)
self.browse_teams_page.visit()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'search-teams',
'topic_id': self.topic['id'],
'team_id': None
}
}, {
'event_type': 'edx.team.searched',
'event': {
'search_text': search_text,
'topic_id': self.topic['id'],
'number_of_results': 0
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events, in_order=False):
search_results_page = self.browse_teams_page.search(search_text)
self.verify_search_header(search_results_page, search_text)
self.assertTrue(search_results_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
Then my browser should post a page viewed event for the teams page
"""
self.create_teams(self.topic, 5)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-topic',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.browse_teams_page.visit()
def test_team_name_xss(self):
"""
Scenario: Team names should be HTML-escaped on the teams page
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic, with a team name containing JS code
Then I should not see any alerts
"""
self.post_team_data({
'course_id': self.course_id,
'topic_id': self.topic['id'],
'name': '<script>alert("XSS")</script>',
'description': 'Description',
'language': 'aa',
'country': 'AF'
})
with self.assertRaises(TimeoutException):
self.browser.get(self.browse_teams_page.url)
alert = get_modal_alert(self.browser)
alert.accept()
class TeamFormActions(TeamsTabBase):
"""
Base class for create, edit, and delete team.
"""
TEAM_DESCRIPTION = 'The Avengers are a fictional team of superheroes.'
topic = {'name': 'Example Topic', 'id': 'example_topic', 'description': 'Description'}
TEAMS_NAME = 'Avengers'
def setUp(self):
super(TeamFormActions, self).setUp()
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
def verify_page_header(self, title, description, breadcrumbs):
"""
Verify that the page header correctly reflects the
create team header, description and breadcrumb.
"""
self.assertEqual(self.team_management_page.header_page_name, title)
self.assertEqual(self.team_management_page.header_page_description, description)
self.assertEqual(self.team_management_page.header_page_breadcrumbs, breadcrumbs)
def verify_and_navigate_to_create_team_page(self):
"""Navigates to the create team page and verifies."""
self.browse_teams_page.click_create_team_link()
self.verify_page_header(
title='Create a New Team',
description='Create a new team if you can\'t find an existing team to join, '
'or if you would like to learn with friends you know.',
breadcrumbs=u'All Topics {topic_name}'.format(topic_name=self.topic['name'])
)
def verify_and_navigate_to_edit_team_page(self):
"""Navigates to the edit team page and verifies."""
self.assertEqual(self.team_page.team_name, self.team['name'])
self.assertTrue(self.team_page.edit_team_button_present)
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
# Edit page header.
self.verify_page_header(
title='Edit Team',
description='If you make significant changes, make sure you notify '
'members of the team before making these changes.',
breadcrumbs=u'All Topics {topic_name} {team_name}'.format(
topic_name=self.topic['name'],
team_name=self.team['name']
)
)
def verify_team_info(self, name, description, location, language):
"""Verify the team information on team page."""
self.assertEqual(self.team_page.team_name, name)
self.assertEqual(self.team_page.team_description, description)
self.assertEqual(self.team_page.team_location, location)
self.assertEqual(self.team_page.team_language, language)
def fill_create_or_edit_form(self):
"""Fill the create/edit team form fields with appropriate values."""
self.team_management_page.value_for_text_field(
field_id='name',
value=self.TEAMS_NAME,
press_enter=False
)
self.team_management_page.set_value_for_textarea_field(
field_id='description',
value=self.TEAM_DESCRIPTION
)
self.team_management_page.value_for_dropdown_field(field_id='language', value='English')
self.team_management_page.value_for_dropdown_field(field_id='country', value='Pakistan')
def verify_all_fields_exist(self):
"""
Verify the fields for create/edit page.
"""
self.assertEqual(
self.team_management_page.message_for_field('name'),
'A name that identifies your team (maximum 255 characters).'
)
self.assertEqual(
self.team_management_page.message_for_textarea_field('description'),
'A short description of the team to help other learners understand '
'the goals or direction of the team (maximum 300 characters).'
)
self.assertEqual(
self.team_management_page.message_for_field('country'),
'The country that team members primarily identify with.'
)
self.assertEqual(
self.team_management_page.message_for_field('language'),
'The language that team members primarily use to communicate with each other.'
)
@attr(shard=5)
@ddt.ddt
class CreateTeamTest(TeamFormActions):
"""
Tests for creating a new Team within a Topic on the Teams page.
"""
def setUp(self):
super(CreateTeamTest, self).setUp()
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.browse_teams_page.visit()
def test_user_can_see_create_team_page(self):
"""
Scenario: The user should be able to see the create team page via teams list page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the Create Team page link on bottom
And When I click create team link
Then I should see the create team page.
And I should see the create team header
And I should also see the help messages for fields.
"""
self.verify_and_navigate_to_create_team_page()
self.verify_all_fields_exist()
def test_user_can_see_error_message_for_missing_data(self):
"""
Scenario: The user should be able to see error message in case of missing required field.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
And When I click create team button without filling required fields
Then I should see the error message and highlighted fields.
"""
self.verify_and_navigate_to_create_team_page()
# `submit_form` clicks on a button, but that button doesn't always
# have the click event handler registered on it in time. That's why
# this test is flaky. Unfortunately, I don't know of a straightforward
# way to write something that waits for that event handler to be bound
# to the button element. So I used time.sleep as well, even though
# the bok choy docs explicitly ask us not to:
# https://bok-choy.readthedocs.io/en/latest/guidelines.html
# Sorry! For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_management_page.submit_form()
self.team_management_page.wait_for(
lambda: self.team_management_page.validation_message_text,
"Validation message text never loaded."
)
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
self.assertTrue(self.team_management_page.error_for_field(field_id='description'))
def test_user_can_see_error_message_for_incorrect_data(self):
"""
Scenario: The user should be able to see error message in case of increasing length for required fields.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I add text > than 255 characters for name field
And I click Create button
Then I should see the error message for exceeding length.
"""
self.verify_and_navigate_to_create_team_page()
# Fill the name field with >255 characters to see validation message.
self.team_management_page.value_for_text_field(
field_id='name',
value='EdX is a massive open online course (MOOC) provider and online learning platform. '
'It hosts online university-level courses in a wide range of disciplines to a worldwide '
'audience, some at no charge. It also conducts research into learning based on how '
'people use its platform. EdX was created for students and institutions that seek to'
'transform themselves through cutting-edge technologies, innovative pedagogy, and '
'rigorous courses. More than 70 schools, nonprofits, corporations, and international'
'organizations offer or plan to offer courses on the edX website. As of 22 October 2014,'
'edX has more than 4 million users taking more than 500 courses online.',
press_enter=False
)
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
def test_user_can_create_new_team_successfully(self):
"""
Scenario: The user should be able to create new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I fill all the fields present with appropriate data
And I click Create button
Then I expect analytics events to be emitted
And I should see the page for my team
And I should see the message that says "You are member of this team"
And the new team should be added to the list of teams within the topic
And the number of teams should be updated on the topic card
And if I switch to "My Team", the newly created team is displayed
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.browse_teams_page.visit()
self.verify_and_navigate_to_create_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.created'
},
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'added_on_create',
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_management_page.submit_form()
# Verify that the page is shown for the new team
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
self.assertEqual(team_page.team_name, self.TEAMS_NAME)
self.assertEqual(team_page.team_description, self.TEAM_DESCRIPTION)
self.assertEqual(team_page.team_user_membership_text, 'You are a member of this team.')
# Verify the new team was added to the topic list
self.teams_page.click_specific_topic("Example Topic")
self.teams_page.verify_topic_team_count(1)
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(1)
# Verify that if one switches to "My Team" without reloading the page, the newly created team is shown.
self.verify_my_team_count(1)
def test_user_can_cancel_the_team_creation(self):
"""
Scenario: The user should be able to cancel the creation of new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I click Cancel button
Then I should see teams list page without any new team.
And if I switch to "My Team", it shows no teams
"""
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.verify_and_navigate_to_create_team_page()
# We add a sleep here to allow time for the click event handler to bind
# to the cancel button. Using time.sleep in bok-choy tests is,
# generally, an anti-pattern. So don't copy this :).
# For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_management_page.cancel_team()
self.browse_teams_page.wait_for_page()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(0)
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the create team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the create team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'new-team',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_create_team_page()
@attr(shard=21)
@ddt.ddt
class DeleteTeamTest(TeamFormActions):
"""
Tests for deleting teams.
"""
def setUp(self):
super(DeleteTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
#need to have a membership to confirm it gets deleted as well
self.create_membership(self.user_info['username'], self.team['id'])
self.team_page.visit()
def test_cancel_delete(self):
"""
Scenario: The user should be able to cancel the Delete Team dialog
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I cancel the prompt
And I refresh the page
Then I should still see the team
"""
self.delete_team(cancel=True)
self.team_management_page.wait_for_page()
self.browser.refresh()
self.team_management_page.wait_for_page()
self.assertEqual(
' '.join(('All Topics', self.topic['name'], self.team['name'])),
self.team_management_page.header_page_breadcrumbs
)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_delete_team(self, role):
"""
Scenario: The user should be able to see and navigate to the delete team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I confirm the prompt
Then I should see the browse teams page
And the team should not be present
"""
# If role is None, remain logged in as global staff
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.delete_team(require_notification=False)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
browse_teams_page.wait_for_page()
self.assertNotIn(self.team['name'], browse_teams_page.team_names)
def delete_team(self, **kwargs):
"""
Delete a team. Passes `kwargs` to `confirm_prompt`.
Expects edx.team.deleted event to be emitted, with correct course_id.
Also expects edx.team.learner_removed event to be emitted for the
membership that is removed as a part of the delete operation.
"""
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.team_management_page.delete_team_button.click()
if 'cancel' in kwargs and kwargs['cancel'] is True:
confirm_prompt(self.team_management_page, **kwargs)
else:
expected_events = [
{
'event_type': 'edx.team.deleted',
'event': {
'team_id': self.team['id']
}
},
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'team_deleted',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
confirm_prompt(self.team_management_page, **kwargs)
def test_delete_team_updates_topics(self):
"""
Scenario: Deleting a team should update the team count on the topics page
Given I am staff user for a course with a team
And I delete a team
When I navigate to the browse topics page
Then the team count for the deletd team's topic should be updated
"""
self.delete_team(require_notification=False)
BrowseTeamsPage(self.browser, self.course_id, self.topic).click_all_topics()
topics_page = BrowseTopicsPage(self.browser, self.course_id)
topics_page.wait_for_page()
self.teams_page.verify_topic_team_count(0)
@attr(shard=17)
@ddt.ddt
class EditTeamTest(TeamFormActions):
"""
Tests for editing the team.
"""
def setUp(self):
super(EditTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
self.team_page.visit()
def test_staff_can_navigate_to_edit_team_page(self):
"""
Scenario: The user should be able to see and navigate to the edit team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And I should see the edit team header
And I should also see the help messages for fields
"""
self.verify_and_navigate_to_edit_team_page()
self.verify_all_fields_exist()
def test_staff_can_edit_team_successfully(self):
"""
Scenario: The staff should be able to edit team successfully.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And an analytics event should be fired
When I edit all the fields with appropriate data
And I click Update button
Then I should see the page for my team with updated data
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'country',
'old': 'AF',
'new': 'PK',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'name',
'old': self.team['name'],
'new': self.TEAMS_NAME,
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'language',
'old': 'aa',
'new': 'en',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'description',
'old': self.team['description'],
'new': self.TEAM_DESCRIPTION,
'truncated': [],
}
},
]
with self.assert_events_match_during(
event_filter=self.only_team_events,
expected_events=expected_events,
):
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_staff_can_cancel_the_team_edit(self):
"""
Scenario: The user should be able to cancel the editing of team.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
Then I should see the Edit Team header
When I click Cancel button
Then I should see team page page without changes.
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.cancel_team()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
def test_student_cannot_see_edit_button(self):
"""
Scenario: The student should not see the edit team button.
Given I am student for a course with a team
When I visit the Team profile page
Then I should not see the Edit Team button
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page.visit()
self.assertFalse(self.team_page.edit_team_button_present)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged_user_can_edit_team(self, role):
"""
Scenario: The user with specified role should see the edit team button.
Given I am user with privileged role for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
"""
kwargs = {
'course_id': self.course_id,
'staff': False
}
if role is not None:
kwargs['roles'] = role
AutoAuthPage(self.browser, **kwargs).visit()
self.team_page.visit()
self.teams_page.wait_for_page()
self.assertTrue(self.team_page.edit_team_button_present)
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_page_viewed_event(self):
"""
Scenario: Visiting the edit team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the edit team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'edit-team',
'topic_id': self.topic['id'],
'team_id': self.team['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_edit_team_page()
@attr(shard=17)
@ddt.ddt
class EditMembershipTest(TeamFormActions):
"""
Tests for administrating from the team membership page
"""
def setUp(self):
super(EditMembershipTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
self.team = self.create_teams(self.topic, num_teams=1)[0]
#make sure a user exists on this team so we can edit the membership
self.create_membership(self.user_info['username'], self.team['id'])
self.edit_membership_page = EditMembershipPage(self.browser, self.course_id, self.team)
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
def edit_membership_helper(self, role, cancel=False):
"""
Helper for common functionality in edit membership tests.
Checks for all relevant assertions about membership being removed,
including verify edx.team.learner_removed events are emitted.
"""
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.assertTrue(
self.team_management_page.membership_button_present
)
self.team_management_page.click_membership_button()
self.edit_membership_page.wait_for_page()
self.edit_membership_page.click_first_remove()
if cancel:
self.edit_membership_page.cancel_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 1)
else:
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'removed_by_admin',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
self.edit_membership_page.confirm_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 0)
self.edit_membership_page.wait_for_page()
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and confirm the dialog
Then my membership should be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=False)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_cancel_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and cancel the dialog
Then my membership should not be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=True)
@attr(shard=17)
@ddt.ddt
class TeamPageTest(TeamsTabBase):
"""Tests for viewing a specific team"""
SEND_INVITE_TEXT = 'Send this link to friends so that they can join too.'
def setUp(self):
super(TeamPageTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
def _set_team_configuration_and_membership(
self,
max_team_size=10,
membership_team_index=0,
visit_team_index=0,
create_membership=True,
another_user=False):
"""
Set team configuration.
Arguments:
max_team_size (int): number of users a team can have
membership_team_index (int): index of team user will join
visit_team_index (int): index of team user will visit
create_membership (bool): whether to create membership or not
another_user (bool): another user to visit a team
"""
#pylint: disable=attribute-defined-outside-init
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': max_team_size, 'topics': [self.topic]}
)
self.teams = self.create_teams(self.topic, 2)
if create_membership:
self.create_membership(self.user_info['username'], self.teams[membership_team_index]['id'])
if another_user:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page = TeamPage(self.browser, self.course_id, self.teams[visit_team_index])
def setup_thread(self):
"""
Create and return a thread for this test's discussion topic.
"""
thread = Thread(
id="test_thread_{}".format(uuid4().hex),
commentable_id=self.teams[0]['discussion_topic_id'],
body="Dummy text body.",
context="standalone",
)
thread_fixture = MultipleThreadFixture([thread])
thread_fixture.push()
return thread
def setup_discussion_user(self, role=None, staff=False):
"""Set this test's user to have the given role in its
discussions. Role is one of 'Community TA', 'Moderator',
'Administrator', or 'Student'.
"""
kwargs = {
'course_id': self.course_id,
'staff': staff
}
if role is not None:
kwargs['roles'] = role
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, **kwargs).visit().user_info
def verify_teams_discussion_permissions(self, should_have_permission):
"""Verify that the teams discussion component is in the correct state
for the test user. If `should_have_permission` is True, assert that
the user can see controls for posting replies, voting, editing, and
deleting. Otherwise, assert that those controls are hidden.
"""
thread = self.setup_thread()
self.team_page.visit()
self.assertEqual(self.team_page.discussion_id, self.teams[0]['discussion_topic_id'])
discussion_page = self.team_page.discussion_page
discussion_page.wait_for_page()
self.assertTrue(discussion_page.is_discussion_expanded())
self.assertEqual(discussion_page.get_num_displayed_threads(), 1)
discussion_page.show_thread(thread['id'])
thread_page = discussion_page.thread_page
assertion = self.assertTrue if should_have_permission else self.assertFalse
assertion(thread_page.q(css='.post-header-actions').present)
assertion(thread_page.q(css='.add-response').present)
def test_discussion_on_my_team_page(self):
"""
Scenario: Team Page renders a discussion for a team to which I belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the existing thread
And I should see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership()
self.verify_teams_discussion_permissions(True)
@ddt.data(True, False)
def test_discussion_on_other_team_page(self, is_staff):
"""
Scenario: Team Page renders a team discussion for a team to which I do
not belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the team's thread
And I should not see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(staff=is_staff)
self.verify_teams_discussion_permissions(False)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged(self, role):
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(role=role)
self.verify_teams_discussion_permissions(True)
def assert_team_details(self, num_members, is_member=True, max_size=10):
"""
Verifies that user can see all the information, present on detail page according to their membership status.
Arguments:
num_members (int): number of users in a team
is_member (bool) default True: True if request user is member else False
max_size (int): number of users a team can have
"""
self.assertEqual(
self.team_page.team_capacity_text,
self.team_page.format_capacity_text(num_members, max_size)
)
self.assertEqual(self.team_page.team_location, 'Afghanistan')
self.assertEqual(self.team_page.team_language, 'Afar')
self.assertEqual(self.team_page.team_members, num_members)
if num_members > 0:
self.assertTrue(self.team_page.team_members_present)
else:
self.assertFalse(self.team_page.team_members_present)
if is_member:
self.assertEqual(self.team_page.team_user_membership_text, 'You are a member of this team.')
self.assertTrue(self.team_page.team_leave_link_present)
self.assertTrue(self.team_page.new_post_button_present)
else:
self.assertEqual(self.team_page.team_user_membership_text, '')
self.assertFalse(self.team_page.team_leave_link_present)
self.assertFalse(self.team_page.new_post_button_present)
def test_team_member_can_see_full_team_details(self):
"""
Scenario: Team member can see full info for team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see the full team detail
And I should see the team members
And I should see my team membership text
And I should see the language & country
And I should see the Leave Team and Invite Team
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assert_team_details(
num_members=1,
)
def test_other_users_can_see_limited_team_details(self):
"""
Scenario: Users who are not member of this team can only see limited info for this team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When I visit the Team page for that team
Then I should not see full team detail
And I should see the team members
And I should not see my team membership text
And I should not see the Leave Team and Invite Team links
"""
self._set_team_configuration_and_membership(create_membership=False)
self.team_page.visit()
self.assert_team_details(is_member=False, num_members=0)
def test_user_can_navigate_to_members_profile_page(self):
"""
Scenario: User can navigate to profile page via team member profile image.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see profile images for the team members
When I click on the first profile image
Then I should be taken to the user's profile page
And I should see the username on profile page
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
learner_name = self.team_page.first_member_username
self.team_page.click_first_profile_image()
learner_profile_page = LearnerProfilePage(self.browser, learner_name)
learner_profile_page.wait_for_page()
learner_profile_page.wait_for_field('username')
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_join_team(self):
"""
Scenario: User can join a Team if not a member already..
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I visit the Team page for that team
Then I should see Join Team button
And I should not see New Post button
When I click on Join Team button
Then there should be no Join Team button and no message
And an analytics event should be emitted
And I should see the updated information under Team Details
And I should see New Post button
And if I switch to "My Team", the team I have joined is displayed
"""
self._set_team_configuration_and_membership(create_membership=False)
teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
teams_page.visit()
teams_page.view_first_team()
self.assertTrue(self.team_page.join_team_button_present)
expected_events = [
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'joined_from_team_view'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_join_team_button()
self.assertFalse(self.team_page.join_team_button_present)
self.assertFalse(self.team_page.join_team_message_present)
self.assert_team_details(num_members=1, is_member=True)
# Verify that if one switches to "My Team" without reloading the page, the newly joined team is shown.
self.teams_page.click_all_topics()
self.verify_my_team_count(1)
def test_already_member_message(self):
"""
Scenario: User should see `You are already in a team` if user is a
member of other team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am already a member of a team
And I visit a team other than mine
Then I should see `You are already in a team` message
"""
self._set_team_configuration_and_membership(membership_team_index=0, visit_team_index=1)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'You already belong to another team.')
self.assert_team_details(num_members=0, is_member=False)
def test_team_full_message(self):
"""
Scenario: User should see `Team is full` message when team is full.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And team has no space left
And I am not a member of any team
And I visit the team
Then I should see `Team is full` message
"""
self._set_team_configuration_and_membership(
create_membership=True,
max_team_size=1,
membership_team_index=0,
visit_team_index=0,
another_user=True
)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'This team is full.')
self.assert_team_details(num_members=1, is_member=False, max_size=1)
def test_leave_team(self):
"""
Scenario: User can leave a team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am a member of team
And I visit the team
And I should not see Join Team button
And I should see New Post button
Then I should see Leave Team link
When I click on Leave Team link
Then user should be removed from team
And an analytics event should be emitted
And I should see Join Team button
And I should not see New Post button
And if I switch to "My Team", the team I have left is not displayed
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assertFalse(self.team_page.join_team_button_present)
self.assert_team_details(num_members=1)
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'remove_method': 'self_removal'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
# I think we're seeing the same problem that we're seeing in
# CreateTeamTest.test_user_can_see_error_message_for_missing_data.
# We click on the "leave team" link after it's loaded, but before
# its JavaScript event handler is added. Adding this sleep gives
# enough time for that event handler to bind to the link. Sorry!
# For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_page.click_leave_team_link()
self.assert_team_details(num_members=0, is_member=False)
self.assertTrue(self.team_page.join_team_button_present)
# Verify that if one switches to "My Team" without reloading the page, the old team no longer shows.
self.teams_page.click_all_topics()
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the team profile page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the team profile page
Then my browser should post a page viewed event
"""
self._set_team_configuration_and_membership()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-team',
'topic_id': self.topic['id'],
'team_id': self.teams[0]['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.team_page.visit()
|
ESOedX/edx-platform
|
common/test/acceptance/tests/lms/test_teams.py
|
Python
|
agpl-3.0
| 84,355
|
[
"VisIt"
] |
d4f2739b4891409d86282af2eefe764705a6a184888a5de953fc723ad908b09a
|
import os
import sys
import re
import roblib
import gzip
'''Combine .gbff and .fna files to get just the coding sequences. We need to get the data from RefSeq and they have
split DNA sequences out of GenBank files so it is not clear that biopython etc will work.
This is just a quick parser and then we get the strings.'''
try:
gbff = sys.argv[1]
fnaf = sys.argv[2]
except:
sys.stderr.write(sys.argv[0] + " <gbff file> <fna file>\n")
sys.exit(-1)
locusre = re.compile('LOCUS\s+(\S+)')
locustagre = re.compile('\s+\/locus_tag=\"(.*)\"')
locationre = re.compile('\s+gene\s+(\d+)\.\.(\d+)$')
locationrerc = re.compile('\s+gene\s+complement\((\d+)\.\.(\d+)\)$')
locus = ""
locustag = ""
[start, end]=['0','0']
complement = False
locations={}
try:
if gbff.endswith('.gz'):
gbfin=gzip.open(gbff, 'rb')
else:
gbfin=open(gbff, 'r')
except:
sys.exit("Unable to open file " + gbff)
for line in gbfin:
line = line.rstrip()
if line == "//":
if start != '0' or end != '0':
# print "\t".join([locus, locustag, start, end, str(complement)])
locations[locus][locustag]=[start, end, complement]
locus = ""
locustag = ""
[start, end]=['0','0']
complement = False
continue
if line.startswith('LOCUS'):
m = locusre.match(line)
locus = m.group(1)
locations[locus]={}
continue
if '/locus_tag' in line:
m = locustagre.match(line)
if m:
locustag = m.group(1)
else:
sys.stderr.write("Couldn't parse |" + line + "|\n")
if '..' in line and 'gene' in line:
if start != '0' or end != '0':
# print "\t".join([locus, locustag, start, end, str(complement)])
locations[locus][locustag]=[start, end, complement]
locustag = ""
[start, end]=['0','0']
complement = False
m = locationre.match(line)
if m:
start = m.group(1)
end = m.group(2)
else:
m = locationrerc.match(line)
if m:
complement = True
start = m.group(1)
end = m.group(2)
else:
sys.stderr.write("Can't parse an apparent location at : " + line + "\n")
fa = roblib.readFasta(fnaf)
ncre = re.compile('.*ref\|(\w+)')
for id in fa:
m = ncre.match(id)
if not m:
sys.stderr.write("No apparent NC_ idenitifer in this sequence id: " + id + "\n")
continue
locus = m.group(1)
for l in locations[locus]:
[start, end, complement] = locations[locus][l]
if complement:
print ">" + l + " " + locus + " " + end + "_" + start + " COMPLEMENT"
print roblib.rc(fa[id][int(start) - 1:int(end)])
else:
print ">" + l + " " + locus + " " + start + "_" + end
print fa[id][int(start)-1:int(end)]
|
linsalrob/EdwardsLab
|
ncbi/combine_gbff_fna.py
|
Python
|
mit
| 2,943
|
[
"Biopython"
] |
3a1266f7a43029e63a2e429406d60df7b9fd26fd87264eed9e5c787d208b7ab2
|
import pytest
from cplpy import run_test, prepare_config
import subprocess as sp
import os
import glob
import numpy as np
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def get_subprocess_error(e):
print("subprocess ERROR")
import json
error = json.loads(e[7:])
print(error['code'], error['message'])
MD_EXEC = "./lmp_cpl"
CFD_EXEC = "./CFD_single_ball.py"
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(scope="module")
def build_case():
print("Building LAMMPS")
#Try to setup code
with cd(TEST_DIR):
try:
build = sp.check_output("./build.sh", shell=True)
except sp.CalledProcessError as e:
if e.output.startswith('error: {'):
get_subprocess_error(e.output)
return build
@pytest.fixture(scope="module",
params=[2., 5., 9.81, 14.])
def run_case(request):
#Try to run code
cmd = ('cplexec -m 1 "' + MD_EXEC + ' < single.in" ' + ' -c 1 "' + CFD_EXEC + " " + str(request.param) + ' "')
print("Running case " + cmd)
with cd(TEST_DIR):
try:
clean = sp.check_output("rm -f ./thermo_output* ./log.lammps* ./debug.vels",
shell=True)
run = sp.check_output(cmd, shell=True)
except sp.CalledProcessError as e:
if e.output.startswith('error: {'):
get_subprocess_error(e.output)
return request
#@pytest.fixture(scope="module")
#def build_run():
# build = build_case()
# run = run_case()
def test_gravity(build_case, run_case):
#Check vs analystical solution for gravity
import falling
with cd(TEST_DIR):
error = falling.check_falling_error_vs_gravity(g=run_case.param)
for e in error:
assert np.abs(e) < 1e-14
|
Crompulence/cpl-library
|
test/lammps/single/no_wall/constant_force/test_falling.py
|
Python
|
gpl-3.0
| 2,107
|
[
"LAMMPS"
] |
edc27aebfba7f02a4a9cbd62ebafd8fd7b0ba655928481f347423f3dd16e8ba2
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""image generation with transformer (attention).
encoder: [Self-Attention, Feed-forward] x n
decoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
@registry.register_model
class Imagetransformer(t2t_model.T2TModel):
"""Conditional image generation with attention. See file docstring.
The model admits either a Categorical or discretized mixture of logistic
distributions (DMOL) as the likelihood. When using DMOL for training, double
check that the evaluation metrics also use it.
"""
def body(self, features):
hparams = copy.copy(self._hparams)
targets = features["targets"]
if (hparams.likelihood == cia.DistributionType.DMOL and
(hparams.target_modality != "image:image_channel_bottom_identity" or
hparams.num_channels != 1)):
raise ValueError("When using DMOL for the likelihood, target_modality "
"must be image:image_channel_bottom_identity and "
"num_channels must be 1.")
if (not tf.get_variable_scope().reuse and
hparams.mode != tf.contrib.learn.ModeKeys.INFER and
hparams.target_modality != "image:image_channel_bottom_identity"):
tf.summary.image("targets", tf.to_float(targets), max_outputs=1)
# Extra losses list if we want to use moe.
losses = []
# Prepare decoder inputs and bias.
decoder_input, rows, cols = cia.prepare_decoder(targets, hparams)
# Add class label to decoder input.
if not hparams.unconditional:
inputs = features["inputs"]
decoder_input += tf.reshape(
inputs,
[common_layers.shape_list(targets)[0], 1, 1, hparams.hidden_size])
decoder_output = cia.transformer_decoder_layers(
decoder_input,
None,
hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.dec_attention_type,
losses=losses,
name="decoder")
output = cia.create_output(decoder_output, rows, cols, targets, hparams)
if losses:
return output, {"extra_loss": tf.add_n(losses)}
else:
return output
def loss(self, logits, features):
if self._hparams.likelihood == cia.DistributionType.DMOL:
return common_layers.dml_loss(logits, features["targets"])
return super(Imagetransformer, self).loss(logits, features)
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
if self._hparams.likelihood == cia.DistributionType.DMOL:
logits, losses = self(features) # pylint: disable=not-callable
samples = common_layers.sample_from_discretized_mix_logistic(
logits, seed=None)
return samples, logits, losses
return super(Imagetransformer, self).sample(features)
def _slow_greedy_infer(self, features, decode_length):
"""A slow greedy inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
samples: an integer `Tensor`.
logits: `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
losses: a dictionary: {loss-name (string): floating point `Scalar`}
"""
if self._hparams.likelihood == cia.DistributionType.DMOL:
raise NotImplementedError("Decoding is not currently available for DMOL.")
return super(Imagetransformer, self)._slow_greedy_infer(features,
decode_length)
@registry.register_model
class ImagetransformerMoe(t2t_model.T2TModel):
"""Conditional image generation with attention and MoE."""
@property
def use_body_sharded(self):
return True
def body_sharded(self, sharded_features):
dp = self._data_parallelism
hparams = copy.copy(self._hparams)
inputs = sharded_features["inputs"]
targets = sharded_features["targets"]
# Determine attention type and padding from hparams.
q_padding, kv_padding = "VALID", "VALID"
if hparams.q_filter_width > 1:
q_padding = "LEFT"
if hparams.kv_filter_width > 1:
kv_padding = "LEFT"
# Prepare decoder inputs and bias.
decoder_input, rows, cols = dp(cia.prepare_decoder_inputs,
inputs, targets, hparams)
# Run decoder.
# TODO(nikip): Use q_padding and kv_padding
del q_padding, kv_padding
decoder_output, extra_loss = cia.transformer_layers_sharded(
dp,
self._ps_devices,
decoder_input,
hparams.num_hidden_layers,
hparams,
self_attention_bias=None,
enc_output=None,
attention_type=hparams.dec_attention_type,
name="decoder")
output = dp(cia.create_output, decoder_output, rows, cols, targets, hparams)
return output, extra_loss
@registry.register_hparams
def image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 4
hparams.max_length = 3075
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 0.2
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.target_modality = "image:identity"
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.add_hparam("filter_size", 512) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("num_output_layers", 3)
hparams.add_hparam("block_size", 1)
# dilated attention based flags
hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64])
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
hparams.add_hparam("num_encoder_layers", 4)
hparams.add_hparam("num_decoder_layers", 12)
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D)
hparams.add_hparam("block_raster_scan", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("likelihood", cia.DistributionType.CAT)
hparams.add_hparam("unconditional", False) # unconditional generation
# parameters of discretized mixture of logistics loss from pixel cnn++
hparams.add_hparam("num_mixtures", 10)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 8
hparams.moe_loss_coef = 1e-3
# These parameters are for relative attention
hparams.add_hparam("shared_rel", False) # share relative embeddings
return hparams
@registry.register_hparams
def imagetransformer_base():
hparams = image_transformer_base()
return hparams
@registry.register_hparams
def imagetransformer_cifar10_base():
"""Best config for 2.90 bits/dim on CIFAR10 using cross entropy."""
hparams = image_transformer_base()
hparams.batch_size = 4
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_cifar10_base_dmol():
"""Best config for 2.90 bits/dim on CIFAR10 using DMOL."""
hparams = image_transformer_base()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.target_modality = "image:image_channel_bottom_identity"
hparams.num_heads = 8
hparams.batch_size = 8
hparams.sampling_method = "random"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.summarize_grads = True
hparams.hidden_size = 256
hparams.filter_size = 512
hparams.attention_key_channels = 512
hparams.attention_value_channels = 512
hparams.num_decoder_layers = 12
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate = 0.1
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.pos = "emb"
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_base_tpu():
"""Transformer base params for cifar-10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 6000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_base_imagenet_tpu():
"""Transformer base params for cifar-10."""
hparams = imagetransformer_base_tpu()
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_imagenet32_base():
"""Best config for ImageNet-32 with 3.77 bits/dim using cross entropy."""
hparams = imagetransformer_cifar10_base()
hparams.batch_size = 4
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_rel():
"""Base with relative attention."""
hparams = imagetransformer_base()
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_hparams
def imagetransformer_sep_channels():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 512
hparams.num_hidden_layers = 6
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 256
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_multipos3():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.q_filter_width = 3
hparams.kv_filter_width = 3
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformer_sep_channels_8l()
hparams.block_width = 256
hparams.block_length = 256
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.num_decoder_layers = 8
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64():
"""big 1d model for unconditional generation on imagenet."""
hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan()
hparams.unconditional = True
hparams.max_length = 14000
hparams.batch_size = 1
hparams.img_len = 64
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_sep_channels_8l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.target_modality = "image:image_channel_bottom_identity"
hparams.num_heads = 8
hparams.batch_size = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 512
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.summarize_grads = True
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformerpp_sep_channels_8l_8h()
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_prepostprocess_dropout = 0.3
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.summarize_grads = True
hparams.learning_rate = 0.01
return hparams
@registry.register_hparams
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan_a():
hparams = imagetransformerpp_base_8l_8h_big_cond_dr03_dan()
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan():
hparams = imagetransformerpp_base_8l_8h_big_cond_dr03_dan_a()
hparams.unconditional = True
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_a():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan()
hparams.learning_rate = 0.01
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_b():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan()
hparams.learning_rate = 0.1
hparams.hidden_size = 256
hparams.attention_key_channels = 512
hparams.attention_value_channels = 512
hparams.filter_size = 1024
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_b()
hparams.filter_size = 512
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate = 0.1
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.pos = "emb"
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
hparams.num_decoder_layers = 12
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
hparams.num_decoder_layers = 12
hparams.clip_grad_norm = 40.
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k()
hparams.batch_size = 8
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_rel():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k()
hparams.batch_size = 8
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_relsh():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_rel()
hparams.shared_rel = True
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p():
"""Gets to 2.92 in just under 4 days on 8 p100s."""
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l()
hparams.num_decoder_layers = 14
hparams.batch_size = 8
hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_bs1():
"""For 128x128."""
# TODO(trandustin): why are these running? max_length and img_len not set
# 256x256 was also training without setting max_length
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m()
hparams.batch_size = 1
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p_bs1():
"""For 128x128."""
hparams = imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p()
hparams.batch_size = 1
return hparams
@registry.register_hparams
def imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1():
"""For 256x256."""
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
# TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in
# image transformer training implementation?
# hparams.img_len = 256
hparams.max_length = 66000 # allow for 256x256
hparams.batch_size = 1
hparams.num_decoder_layers = 5
hparams.hidden_size = 128
hparams.filter_size = 128
hparams.attention_key_channels = 64
hparams.attention_value_channels = 64
hparams.layer_prepostprocess_dropout = 0.0
return hparams
@registry.register_hparams
def imagetransformerpp_base_5l_8h_dr00_dan_g_bs1_adafactor():
"""For 256x256."""
hparams = imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1()
# Use Adafactor which uses less memory than Adam, and its recommendations.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
return hparams
@registry.register_hparams
def imagetransformerpp_base_6l_8h_dr00_dan_g_bs1_adafactor():
"""For 256x256."""
hparams = imagetransformerpp_base_5l_8h_dr00_dan_g_bs1_adafactor()
hparams.num_decoder_layers = 6
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_eval():
"""Gets to 2.92 in just under 4 days on 8 p100s."""
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l()
hparams.num_decoder_layers = 14
hparams.batch_size = 8
# hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_128():
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.block_width = 128
hparams.block_length = 128
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_cond_dr03_dan():
"""Best conditional Cifar10 gen param."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan():
"""Best unconditional Cifar10 gen param."""
hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan()
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0]
hparams.dec_attention_type = cia.AttentionType.DILATED
hparams.block_length = 128
hparams.block_width = 128
hparams.add_hparam("num_memory_blocks", 1)
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_b():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.block_width = 64
hparams.num_memory_blocks = 2
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_c():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.block_width = 32
hparams.num_memory_blocks = 4
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_d():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.gap_sizes = [0, 16, 64, 16, 64, 128, 256, 0]
return hparams
@registry.register_hparams
def imagetransformer_base_12l_8h_big():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.filter_size = 1024
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.hidden_size = 512
hparams.learning_rate_warmup_steps = 4000
hparams.sampling_method = "random"
hparams.beam_size = 1
hparams.block_width = 256
return hparams
@registry.register_hparams
def imagetransformer1d_base_8l_64by64():
"""hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.block_length = 512
hparams.block_width = 768
hparams.layer_prepostprocess_dropout = 0.1
hparams.max_length = 14000
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer1d_base_12l_64by64():
"""hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.block_length = 512
hparams.block_width = 768
hparams.layer_prepostprocess_dropout = 0.1
hparams.max_length = 14000
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big()
hparams.num_decoder_layers = 14
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_dr01():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big()
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_12l_8h_big_uncond():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big()
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_uncond():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big_uncond()
hparams.num_decoder_layers = 14
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_12l_16h_imagenet_large():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 1
hparams.filter_size = 2048
hparams.num_heads = 16
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 128
return hparams
@registry.register_hparams
def imagetransformer_sep_output_channels_8l_local_and_global_att():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.sampling_method = "random"
hparams.local_and_global_att = True
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_uncond_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.unconditional = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 8
hparams.batch_size = 1
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 512
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_8h_local_and_global_att():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_heads = 8
hparams.batch_size = 1
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 256
hparams.num_hidden_layers = 4
hparams.sampling_method = "random"
hparams.local_and_global_att = True
return hparams
@registry.register_hparams
def imagetransformer_bas8l_8h_big_uncond_dr03_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 8
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_tiny():
hparams = imagetransformer_base()
hparams.num_decoder_layers = 2
hparams.hidden_size = 64
hparams.batch_size = 1
hparams.unconditional = True
hparams.max_length = 66000 # allow for 256x256
return hparams
@registry.register_hparams
def imagetransformerpp_tiny():
hparams = imagetransformer_tiny()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.target_modality = "image:image_channel_bottom_identity"
return hparams
@registry.register_hparams
def imagetransformer_tiny_tpu():
hparams = imagetransformer_tiny()
update_hparams_for_tpu(hparams)
hparams.num_hidden_layers = 2
hparams.hidden_size = 16
hparams.batch_size = 2
hparams.num_heads = 2
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_dr01_moe_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_10l_16h_big_dr01_imgnet()
hparams.initializer = "orthogonal"
hparams.learning_rate_warmup_steps = 16000
hparams.add_hparam("moe_layers_decoder", "2,7") # Which layer is MoE.
hparams.moe_hidden_sizes = "4096" # Hidden layer sizes (comma-separated).
hparams.moe_num_experts = 64 # Number of experts in each MoE layer.
hparams.moe_k = 4 # How many experts to use per batch element (try 2 or 4).
hparams.moe_loss_coef = 3e-2 # MoE loss coefficient (1e-2 is usually ok).
hparams.scheduled_sampling_prob = 0.1
hparams.scheduled_sampling_warmup_steps = 200000
return hparams
@registry.register_hparams
def imagetransformer_moe_tiny():
"""Set of hyperparameters for a very small imagetransformer with MoE."""
hparams = imagetransformer_tiny()
hparams.hidden_size = 64
hparams.batch_size = 1
hparams.num_hidden_layers = 3
hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D
hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE.
hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated).
hparams.moe_num_experts = 16 # Number of experts in each MoE layer.
hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4).
hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok).
return hparams
def update_hparams_for_tpu(hparams):
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 6000
hparams.batch_size = 4
@registry.register_hparams
def imagetransformer_sep_channels_8l_tpu():
"""Hparams for training imagetransformer on tpu."""
hparams = imagetransformer_sep_channels_8l()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr03_tpu():
"""Small model for tpu cifar 10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 1024
hparams.learning_rate = 0.2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
return hparams
@registry.register_hparams
def imagetransformer_b10l_dr03_moe_tpu():
"""Moe tpu params."""
hparams = imagetransformer_b10l_4h_big_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.ffn_layer = "local_moe_tpu"
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr03_lr025_tpu():
"""TPU related small model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.learning_rate = 0.25
hparams.learning_rate_warmup_steps = 8000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
# hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_big_uncond_dr03_tpu():
"""TPU 12 layer model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 1024
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_big_uncond_dr03_lr025_tpu():
hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.learning_rate = 0.25
hparams.learning_rate_warmup_steps = 5000
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b256_uncond_dr03_tpu():
"""works very well on 4x4."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b256_uncond_dr03_rel_tpu():
"""works very well on 4x4."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
hparams.shared_rel = True
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_ranged_hparams
def imagetransformer_cifar_tpu_range(rhp):
"""Range of hyperparameters for vizier."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16])
rhp.set_discrete("hidden_size", [256, 512, 1024])
rhp.set_discrete("block_length", [128, 256, 512])
rhp.set_categorical("dec_attention_type", [
cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D])
@registry.register_hparams
def imagetransformer_b12l_4h_b128_h512_uncond_dr03_tpu():
"""TPU related big model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 6000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im():
"""TPU related imagenet model."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 6000
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_uncond_dr03_tpu():
"""TPU related small model."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b128_uncond_dr03_tpu():
"""TPU config for cifar 10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 256
hparams.filter_size = 2048
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
return hparams
@registry.register_hparams
def imagetransformer_b12l_8h_b256_uncond_dr03_tpu():
"""TPU related 12 layer 8 heads model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 8 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr01_tpu():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 4
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams
|
vthorsteinsson/tensor2tensor
|
tensor2tensor/models/image_transformer.py
|
Python
|
apache-2.0
| 37,784
|
[
"MOE"
] |
9b702689ad62e7719ecea06d4efa0098da2ca1d97b6043068cdd80fd173d353a
|
# -*- coding: utf-8 -*-
"""
molvs.metal
~~~~~~~~~~~
This module contains tools for disconnecting metal atoms that are defined as covalently bonded to non-metals.
:copyright: Copyright 2016 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
import logging
from rdkit import Chem
log = logging.getLogger(__name__)
# TODO: This won't disconnect e.g. covalent [Na]Cl...
class MetalDisconnector(object):
"""Class for breaking covalent bonds between metals and organic atoms under certain conditions."""
def __init__(self):
log.debug('Initializing MetalDisconnector')
# Initialize SMARTS to identify relevant substructures
# TODO: Use atomic numbers instead of element symbols in SMARTS to allow for isotopes?
self._metal_nof = Chem.MolFromSmarts(
'[Li,Na,K,Rb,Cs,Fr,Be,Mg,Ca,Sr,Ba,Ra,Sc,Ti,V,Cr,Mn,Fe,Co,Ni,Cu,Zn,Al,Ga,Y,Zr,Nb,Mo,Tc,Ru,Rh,Pd,Ag,Cd,In,Sn,Hf,Ta,W,Re,Os,Ir,Pt,Au,Hg,Tl,Pb,Bi]~[N,O,F]')
self._metal_non = Chem.MolFromSmarts(
'[Al,Sc,Ti,V,Cr,Mn,Fe,Co,Ni,Cu,Zn,Y,Zr,Nb,Mo,Tc,Ru,Rh,Pd,Ag,Cd,Hf,Ta,W,Re,Os,Ir,Pt,Au]~[B,C,Si,P,As,Sb,S,Se,Te,Cl,Br,I,At]')
def __call__(self, mol):
"""Calling a MetalDisconnector instance like a function is the same as calling its disconnect(mol) method."""
return self.disconnect(mol)
def disconnect(self, mol):
"""Break covalent bonds between metals and organic atoms under certain conditions.
The algorithm works as follows:
- Disconnect N, O, F from any metal.
- Disconnect other non-metals from transition metals + Al (but not Hg, Ga, Ge, In, Sn, As, Tl, Pb, Bi, Po).
- For every bond broken, adjust the charges of the begin and end atoms accordingly.
:param mol: The input molecule.
:type mol: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
:return: The molecule with metals disconnected.
:rtype: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
"""
log.debug('Running MetalDisconnector')
# Remove bonds that match SMARTS
metals = (self._metal_nof, self._metal_non)
for smarts in metals:
pairs = mol.GetSubstructMatches(smarts)
rwmol = Chem.RWMol(mol)
orders = []
for i, j in pairs:
# TODO: Could get the valence contributions of the bond instead of GetBondTypeAsDouble?
orders.append(int(mol.GetBondBetweenAtoms(i, j).GetBondTypeAsDouble()))
rwmol.RemoveBond(i, j)
# Adjust neighbouring charges accordingly
mol = rwmol.GetMol()
for n, (i, j) in enumerate(pairs):
chg = orders[n]
atom1 = mol.GetAtomWithIdx(i)
atom1.SetFormalCharge(atom1.GetFormalCharge() + chg)
atom2 = mol.GetAtomWithIdx(j)
atom2.SetFormalCharge(atom2.GetFormalCharge() - chg)
log.info(f'Removed covalent bond between {atom1.GetSymbol()} and {atom2.GetSymbol()}')
Chem.SanitizeMol(mol)
return mol
|
bp-kelley/rdkit
|
rdkit/Chem/MolStandardize/metal.py
|
Python
|
bsd-3-clause
| 3,082
|
[
"RDKit"
] |
9ef764f55473933a06109f990d9a8ee1946aaf930e64cbd56a9a1ab3d52ceb76
|
# pylint: disable=bad-continuation
"""
Certificate HTML webview.
"""
import logging
import urllib
from datetime import datetime
from uuid import uuid4
import pytz
from django.conf import settings
from django.contrib.auth.models import User
from django.http import Http404, HttpResponse
from django.template import RequestContext
from django.utils.encoding import smart_str
from django.utils import translation
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from badges.events.course_complete import get_completion_badge
from badges.utils import badges_enabled
from lms.djangoapps.certificates.api import (
emit_certificate_event,
get_active_web_certificate,
get_certificate_footer_context,
get_certificate_header_context,
get_certificate_template,
get_certificate_url
)
from lms.djangoapps.certificates.models import (
CertificateGenerationCourseSetting,
CertificateHtmlViewConfiguration,
CertificateSocialNetworks,
CertificateStatuses,
GeneratedCertificate
)
from courseware.access import has_access
from courseware.courses import get_course_by_id
from edxmako.shortcuts import render_to_response
from edxmako.template import Template
from openedx.core.djangoapps.catalog.utils import get_course_run_details
from openedx.core.djangoapps.lang_pref.api import get_closest_released_language
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.lib.courses import course_image_url
from openedx.core.djangoapps.certificates.api import display_date_for_certificate, certificates_viewable_for_course
from student.models import LinkedInAddToProfileConfiguration
from util import organizations_helpers as organization_api
from util.date_utils import strftime_localized
from util.views import handle_500
log = logging.getLogger(__name__)
_ = translation.ugettext
INVALID_CERTIFICATE_TEMPLATE_PATH = 'certificates/invalid.html'
def get_certificate_description(mode, certificate_type, platform_name):
"""
:return certificate_type_description on the basis of current mode
"""
certificate_type_description = None
if mode == 'honor':
# Translators: This text describes the 'Honor' course certificate type.
certificate_type_description = _("An {cert_type} certificate signifies that a "
"learner has agreed to abide by the honor code established by {platform_name} "
"and has completed all of the required tasks for this course under its "
"guidelines.").format(cert_type=certificate_type,
platform_name=platform_name)
elif mode == 'verified':
# Translators: This text describes the 'ID Verified' course certificate type, which is a higher level of
# verification offered by edX. This type of verification is useful for professional education/certifications
certificate_type_description = _("A {cert_type} certificate signifies that a "
"learner has agreed to abide by the honor code established by {platform_name} "
"and has completed all of the required tasks for this course under its "
"guidelines. A {cert_type} certificate also indicates that the "
"identity of the learner has been checked and "
"is valid.").format(cert_type=certificate_type,
platform_name=platform_name)
elif mode == 'xseries':
# Translators: This text describes the 'XSeries' course certificate type. An XSeries is a collection of
# courses related to each other in a meaningful way, such as a specific topic or theme, or even an organization
certificate_type_description = _("An {cert_type} certificate demonstrates a high level of "
"achievement in a program of study, and includes verification of "
"the student's identity.").format(cert_type=certificate_type)
return certificate_type_description
def _update_certificate_context(context, course, user_certificate, platform_name):
"""
Build up the certificate web view context using the provided values
(Helper method to keep the view clean)
"""
# Populate dynamic output values using the course/certificate data loaded above
certificate_type = context.get('certificate_type')
# Override the defaults with any mode-specific static values
context['certificate_id_number'] = user_certificate.verify_uuid
context['certificate_verify_url'] = "{prefix}{uuid}{suffix}".format(
prefix=context.get('certificate_verify_url_prefix'),
uuid=user_certificate.verify_uuid,
suffix=context.get('certificate_verify_url_suffix')
)
# Translators: The format of the date includes the full name of the month
date = display_date_for_certificate(course, user_certificate)
context['certificate_date_issued'] = _('{month} {day}, {year}').format(
month=strftime_localized(date, "%B"),
day=date.day,
year=date.year
)
# Translators: This text represents the verification of the certificate
context['document_meta_description'] = _('This is a valid {platform_name} certificate for {user_name}, '
'who participated in {partner_short_name} {course_number}').format(
platform_name=platform_name,
user_name=context['accomplishment_copy_name'],
partner_short_name=context['organization_short_name'],
course_number=context['course_number']
)
# Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar
context['document_title'] = _("{partner_short_name} {course_number} Certificate | {platform_name}").format(
partner_short_name=context['organization_short_name'],
course_number=context['course_number'],
platform_name=platform_name
)
# Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate
# screen. The text describes the accomplishment represented by the certificate information displayed to the user
context['accomplishment_copy_description_full'] = _("successfully completed, received a passing grade, and was "
"awarded this {platform_name} "
"Certificate of Completion in ").format(
platform_name=platform_name)
certificate_type_description = get_certificate_description(user_certificate.mode, certificate_type, platform_name)
if certificate_type_description:
context['certificate_type_description'] = certificate_type_description
# Translators: This text describes the purpose (and therefore, value) of a course certificate
context['certificate_info_description'] = _("{platform_name} acknowledges achievements through "
"certificates, which are awarded for course activities "
"that {platform_name} students complete.").format(
platform_name=platform_name,
tos_url=context.get('company_tos_url'),
verified_cert_url=context.get('company_verified_certificate_url'))
def _update_context_with_basic_info(context, course_id, platform_name, configuration):
"""
Updates context dictionary with basic info required before rendering simplest
certificate templates.
"""
# Update the view context with the default ConfigurationModel settings
context.update(configuration.get('default', {}))
context['platform_name'] = platform_name
context['course_id'] = course_id
# Translators: 'All rights reserved' is a legal term used in copyrighting to protect published content
reserved = _("All rights reserved")
context['copyright_text'] = u'© {year} {platform_name}. {reserved}.'.format(
year=datetime.now(pytz.timezone(settings.TIME_ZONE)).year,
platform_name=platform_name,
reserved=reserved
)
# Translators: This text is bound to the HTML 'title' element of the page and appears
# in the browser title bar when a requested certificate is not found or recognized
context['document_title'] = _("Invalid Certificate")
context['company_tos_urltext'] = _("Terms of Service & Honor Code")
# Translators: A 'Privacy Policy' is a legal document/statement describing a website's use of personal information
context['company_privacy_urltext'] = _("Privacy Policy")
# Translators: This line appears as a byline to a header image and describes the purpose of the page
context['logo_subtitle'] = _("Certificate Validation")
# Translators: Accomplishments describe the awards/certifications obtained by students on this platform
context['accomplishment_copy_about'] = _('About {platform_name} Accomplishments').format(
platform_name=platform_name
)
# Translators: This line appears on the page just before the generation date for the certificate
context['certificate_date_issued_title'] = _("Issued On:")
# Translators: The Certificate ID Number is an alphanumeric value unique to each individual certificate
context['certificate_id_number_title'] = _('Certificate ID Number')
context['certificate_info_title'] = _('About {platform_name} Certificates').format(
platform_name=platform_name
)
context['certificate_verify_title'] = _("How {platform_name} Validates Student Certificates").format(
platform_name=platform_name
)
# Translators: This text describes the validation mechanism for a certificate file (known as GPG security)
context['certificate_verify_description'] = _('Certificates issued by {platform_name} are signed by a gpg key so '
'that they can be validated independently by anyone with the '
'{platform_name} public key. For independent verification, '
'{platform_name} uses what is called a '
'"detached signature""".').format(platform_name=platform_name)
context['certificate_verify_urltext'] = _("Validate this certificate for yourself")
# Translators: This text describes (at a high level) the mission and charter the edX platform and organization
context['company_about_description'] = _("{platform_name} offers interactive online classes and MOOCs.").format(
platform_name=platform_name)
context['company_about_title'] = _("About {platform_name}").format(platform_name=platform_name)
context['company_about_urltext'] = _("Learn more about {platform_name}").format(platform_name=platform_name)
context['company_courselist_urltext'] = _("Learn with {platform_name}").format(platform_name=platform_name)
context['company_careers_urltext'] = _("Work at {platform_name}").format(platform_name=platform_name)
context['company_contact_urltext'] = _("Contact {platform_name}").format(platform_name=platform_name)
# Translators: This text appears near the top of the certficate and describes the guarantee provided by edX
context['document_banner'] = _("{platform_name} acknowledges the following student accomplishment").format(
platform_name=platform_name
)
def _update_course_context(request, context, course, course_key, platform_name):
"""
Updates context dictionary with course info.
"""
context['full_course_image_url'] = request.build_absolute_uri(course_image_url(course))
course_title_from_cert = context['certificate_data'].get('course_title', '')
accomplishment_copy_course_name = course_title_from_cert if course_title_from_cert else course.display_name
context['accomplishment_copy_course_name'] = accomplishment_copy_course_name
course_number = course.display_coursenumber if course.display_coursenumber else course.number
context['course_number'] = course_number
if context['organization_long_name']:
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _('a course of study offered by {partner_long_name}.').format(
partner_long_name=context['organization_long_name'],
platform_name=platform_name)
else:
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _('a course of study offered by {partner_short_name}.').format(
partner_short_name=context['organization_short_name'],
platform_name=platform_name)
def _update_social_context(request, context, course, user, user_certificate, platform_name):
"""
Updates context dictionary with info required for social sharing.
"""
share_settings = configuration_helpers.get_value("SOCIAL_SHARING_SETTINGS", settings.SOCIAL_SHARING_SETTINGS)
context['facebook_share_enabled'] = share_settings.get('CERTIFICATE_FACEBOOK', False)
context['facebook_app_id'] = configuration_helpers.get_value("FACEBOOK_APP_ID", settings.FACEBOOK_APP_ID)
context['facebook_share_text'] = share_settings.get(
'CERTIFICATE_FACEBOOK_TEXT',
_("I completed the {course_title} course on {platform_name}.").format(
course_title=context['accomplishment_copy_course_name'],
platform_name=platform_name
)
)
context['twitter_share_enabled'] = share_settings.get('CERTIFICATE_TWITTER', False)
context['twitter_share_text'] = share_settings.get(
'CERTIFICATE_TWITTER_TEXT',
_("I completed a course at {platform_name}. Take a look at my certificate.").format(
platform_name=platform_name
)
)
share_url = request.build_absolute_uri(get_certificate_url(course_id=course.id, uuid=user_certificate.verify_uuid))
context['share_url'] = share_url
twitter_url = ''
if context.get('twitter_share_enabled', False):
twitter_url = 'https://twitter.com/intent/tweet?text={twitter_share_text}&url={share_url}'.format(
twitter_share_text=smart_str(context['twitter_share_text']),
share_url=urllib.quote_plus(smart_str(share_url))
)
context['twitter_url'] = twitter_url
context['linked_in_url'] = None
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
linkedin_share_enabled = share_settings.get('CERTIFICATE_LINKEDIN', linkedin_config.enabled)
if linkedin_share_enabled:
context['linked_in_url'] = linkedin_config.add_to_profile_url(
course.id,
course.display_name,
user_certificate.mode,
smart_str(share_url)
)
def _update_context_with_user_info(context, user, user_certificate):
"""
Updates context dictionary with user related info.
"""
user_fullname = user.profile.name
context['username'] = user.username
context['course_mode'] = user_certificate.mode
context['accomplishment_user_id'] = user.id
context['accomplishment_copy_name'] = user_fullname
context['accomplishment_copy_username'] = user.username
context['accomplishment_more_title'] = _("More Information About {user_name}'s Certificate:").format(
user_name=user_fullname
)
# Translators: This line is displayed to a user who has completed a course and achieved a certification
context['accomplishment_banner_opening'] = _("{fullname}, you earned a certificate!").format(
fullname=user_fullname
)
# Translators: This line congratulates the user and instructs them to share their accomplishment on social networks
context['accomplishment_banner_congrats'] = _("Congratulations! This page summarizes what "
"you accomplished. Show it off to family, friends, and colleagues "
"in your social and professional networks.")
# Translators: This line leads the reader to understand more about the certificate that a student has been awarded
context['accomplishment_copy_more_about'] = _("More about {fullname}'s accomplishment").format(
fullname=user_fullname
)
def _get_user_certificate(request, user, course_key, course, preview_mode=None):
"""
Retrieves user's certificate from db. Creates one in case of preview mode.
Returns None if there is no certificate generated for given user
otherwise returns `GeneratedCertificate` instance.
"""
user_certificate = None
if preview_mode:
# certificate is being previewed from studio
if has_access(request.user, 'instructor', course) or has_access(request.user, 'staff', course):
if course.certificate_available_date and not course.self_paced:
modified_date = course.certificate_available_date
else:
modified_date = datetime.now().date()
user_certificate = GeneratedCertificate(
mode=preview_mode,
verify_uuid=unicode(uuid4().hex),
modified_date=modified_date
)
elif certificates_viewable_for_course(course):
# certificate is being viewed by learner or public
try:
user_certificate = GeneratedCertificate.eligible_certificates.get(
user=user,
course_id=course_key,
status=CertificateStatuses.downloadable
)
except GeneratedCertificate.DoesNotExist:
pass
return user_certificate
def _track_certificate_events(request, context, course, user, user_certificate):
"""
Tracks web certificate view related events.
"""
# Badge Request Event Tracking Logic
course_key = course.location.course_key
if 'evidence_visit' in request.GET:
badge_class = get_completion_badge(course_key, user)
if not badge_class:
log.warning('Visit to evidence URL for badge, but badges not configured for course "%s"', course_key)
badges = []
else:
badges = badge_class.get_for_user(user)
if badges:
# There should only ever be one of these.
badge = badges[0]
tracker.emit(
'edx.badge.assertion.evidence_visited',
{
'badge_name': badge.badge_class.display_name,
'badge_slug': badge.badge_class.slug,
'badge_generator': badge.backend,
'issuing_component': badge.badge_class.issuing_component,
'user_id': user.id,
'course_id': unicode(course_key),
'enrollment_mode': badge.badge_class.mode,
'assertion_id': badge.id,
'assertion_image_url': badge.image_url,
'assertion_json_url': badge.assertion_url,
'issuer': badge.data.get('issuer'),
}
)
else:
log.warn(
"Could not find badge for %s on course %s.",
user.id,
course_key,
)
# track certificate evidence_visited event for analytics when certificate_user and accessing_user are different
if request.user and request.user.id != user.id:
emit_certificate_event('evidence_visited', user, unicode(course.id), course, {
'certificate_id': user_certificate.verify_uuid,
'enrollment_mode': user_certificate.mode,
'social_network': CertificateSocialNetworks.linkedin
})
def _update_configuration_context(context, configuration):
"""
Site Configuration will need to be able to override any hard coded
content that was put into the context in the
_update_certificate_context() call above. For example the
'company_about_description' talks about edX, which we most likely
do not want to keep in configurations.
So we need to re-apply any configuration/content that
we are sourcing from the database. This is somewhat duplicative of
the code at the beginning of this method, but we
need the configuration at the top as some error code paths
require that to be set up early on in the pipeline
"""
config_key = configuration_helpers.get_value('domain_prefix')
config = configuration.get("microsites", {})
if config_key and config:
context.update(config.get(config_key, {}))
def _update_badge_context(context, course, user, preview_mode=None):
"""
Updates context with badge info.
"""
badges = []
if badges_enabled() and course.issue_badges:
badges =\
get_completion_badge(
course.location.course_key,
user,
preview_mode
).get_for_user(user)
context['badges'] = badges
def _update_organization_context(context, course):
"""
Updates context with organization related info.
"""
partner_long_name, organization_logo = None, None
partner_short_name = course.display_organization if course.display_organization else course.org
organizations = organization_api.get_course_organizations(course_id=course.id)
if organizations:
#TODO Need to add support for multiple organizations, Currently we are interested in the first one.
organization = organizations[0]
partner_long_name = organization.get('name', partner_long_name)
partner_short_name = organization.get('short_name', partner_short_name)
organization_logo = organization.get('logo', None)
context['organization_long_name'] = partner_long_name
context['organization_short_name'] = partner_short_name
context['accomplishment_copy_course_org'] = partner_short_name
context['organization_logo'] = organization_logo
def render_cert_by_uuid(request, certificate_uuid):
"""
This public view generates an HTML representation of the specified certificate
"""
try:
certificate = GeneratedCertificate.eligible_certificates.get(
verify_uuid=certificate_uuid,
status=CertificateStatuses.downloadable
)
return render_html_view(request, certificate.user.id, unicode(certificate.course_id))
except GeneratedCertificate.DoesNotExist:
raise Http404
@handle_500(
template_path="certificates/server-error.html",
test_func=lambda request: request.GET.get('preview', None)
)
def render_html_view(request, user_id, course_id):
"""
This public view generates an HTML representation of the specified user and course
If a certificate is not available, we display a "Sorry!" screen instead
"""
try:
user_id = int(user_id)
except ValueError:
raise Http404
preview_mode = request.GET.get('preview', None)
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
configuration = CertificateHtmlViewConfiguration.get_config()
# Kick the user back to the "Invalid" screen if the feature is disabled globally
if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
return _render_invalid_certificate(course_id, platform_name, configuration)
# Load the course and user objects
try:
course_key = CourseKey.from_string(course_id)
user = User.objects.get(id=user_id)
course = get_course_by_id(course_key)
# For any course or user exceptions, kick the user back to the "Invalid" screen
except (InvalidKeyError, User.DoesNotExist, Http404) as exception:
error_str = (
"Invalid cert: error finding course %s or user with id "
"%d. Specific error: %s"
)
log.info(error_str, course_id, user_id, str(exception))
return _render_invalid_certificate(course_id, platform_name, configuration)
# Kick the user back to the "Invalid" screen if the feature is disabled for the course
if not course.cert_html_view_enabled:
log.info(
"Invalid cert: HTML certificates disabled for %s. User id: %d",
course_id,
user_id,
)
return _render_invalid_certificate(course_id, platform_name, configuration)
# Load user's certificate
user_certificate = _get_user_certificate(request, user, course_key, course, preview_mode)
if not user_certificate:
log.info(
"Invalid cert: User %d does not have eligible cert for %s.",
user_id,
course_id,
)
return _render_invalid_certificate(course_id, platform_name, configuration)
# Get the active certificate configuration for this course
# If we do not have an active certificate, we'll need to send the user to the "Invalid" screen
# Passing in the 'preview' parameter, if specified, will return a configuration, if defined
active_configuration = get_active_web_certificate(course, preview_mode)
if active_configuration is None:
log.info(
"Invalid cert: course %s does not have an active configuration. User id: %d",
course_id,
user_id,
)
return _render_invalid_certificate(course_id, platform_name, configuration)
# Get data from Discovery service that will be necessary for rendering this Certificate.
catalog_data = _get_catalog_data_for_course(course_key)
# Determine whether to use the standard or custom template to render the certificate.
custom_template = None
custom_template_language = None
if settings.FEATURES.get('CUSTOM_CERTIFICATE_TEMPLATES_ENABLED', False):
custom_template, custom_template_language = _get_custom_template_and_language(
course.id,
user_certificate.mode,
catalog_data.pop('content_language', None)
)
# Determine the language that should be used to render the certificate.
# For the standard certificate template, use the user language. For custom templates, use
# the language associated with the template.
user_language = translation.get_language()
certificate_language = custom_template_language if custom_template else user_language
# Generate the certificate context in the correct language, then render the template.
with translation.override(certificate_language):
context = {'user_language': user_language}
_update_context_with_basic_info(context, course_id, platform_name, configuration)
context['certificate_data'] = active_configuration
# Append/Override the existing view context values with any mode-specific ConfigurationModel values
context.update(configuration.get(user_certificate.mode, {}))
# Append organization info
_update_organization_context(context, course)
# Append course info
_update_course_context(request, context, course, course_key, platform_name)
# Append course run info from discovery
context.update(catalog_data)
# Append badge info
_update_badge_context(context, course, user, preview_mode)
# Append user info
_update_context_with_user_info(context, user, user_certificate)
# Append social sharing info
_update_social_context(request, context, course, user, user_certificate, platform_name)
# Append/Override the existing view context values with certificate specific values
_update_certificate_context(context, course, user_certificate, platform_name)
# Append badge info
_update_badge_context(context, course, user)
# Append site configuration overrides
_update_configuration_context(context, configuration)
# Add certificate header/footer data to current context
context.update(get_certificate_header_context(is_secure=request.is_secure()))
context.update(get_certificate_footer_context())
# Append/Override the existing view context values with any course-specific static values from Advanced Settings
context.update(course.cert_html_view_overrides)
# Track certificate view events
_track_certificate_events(request, context, course, user, user_certificate)
# Render the certificate
return _render_valid_certificate(request, context, custom_template)
def _get_catalog_data_for_course(course_key):
"""
Retrieve data from the Discovery service necessary for rendering a certificate for a specific course.
"""
course_certificate_settings = CertificateGenerationCourseSetting.get(course_key)
if not course_certificate_settings:
return {}
catalog_data = {}
course_run_fields = []
if course_certificate_settings.language_specific_templates_enabled:
course_run_fields.append('content_language')
if course_certificate_settings.include_hours_of_effort:
course_run_fields.extend(['weeks_to_complete', 'max_effort'])
if course_run_fields:
course_run_data = get_course_run_details(course_key, course_run_fields)
if course_run_data.get('weeks_to_complete') and course_run_data.get('max_effort'):
try:
weeks_to_complete = int(course_run_data['weeks_to_complete'])
max_effort = int(course_run_data['max_effort'])
catalog_data['hours_of_effort'] = weeks_to_complete * max_effort
except ValueError:
log.exception('Error occurred while parsing course run details')
catalog_data['content_language'] = course_run_data.get('content_language')
return catalog_data
def _get_custom_template_and_language(course_id, course_mode, course_language):
"""
Return the custom certificate template, if any, that should be rendered for the provided course/mode/language
combination, along with the language that should be used to render that template.
"""
closest_released_language = get_closest_released_language(course_language) if course_language else None
template = get_certificate_template(course_id, course_mode, closest_released_language)
if template and template.language:
return (template, closest_released_language)
elif template:
return (template, settings.LANGUAGE_CODE)
else:
return (None, None)
def _render_invalid_certificate(course_id, platform_name, configuration):
context = {}
_update_context_with_basic_info(context, course_id, platform_name, configuration)
return render_to_response(INVALID_CERTIFICATE_TEMPLATE_PATH, context)
def _render_valid_certificate(request, context, custom_template=None):
if custom_template:
template = Template(
custom_template.template,
output_encoding='utf-8',
input_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace',
)
context = RequestContext(request, context)
return HttpResponse(template.render(context))
else:
return render_to_response("certificates/valid.html", context)
|
proversity-org/edx-platform
|
lms/djangoapps/certificates/views/webview.py
|
Python
|
agpl-3.0
| 31,811
|
[
"VisIt"
] |
2a77213e37290b96261f9694438e60f4d220c6d7027584b3fb9d6050bf5675bd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Extract sequences from a fasta file and group them in output files.
Groups of 'number' sequences are formed.
Output file names will begin by 'stub'.
Usage:
%program <input_file> <number> <stub>"""
import sys
import re
try:
from Bio import SeqIO
except:
print("This program requires the Biopython library")
sys.exit(0)
try:
fasta_file = sys.argv[1] # Input fasta file
nb_sequences = int(sys.argv[2]) # Number of sequences per group
result_file = sys.argv[3] # Output fasta filename stub eg: my_genes
except:
print(__doc__)
sys.exit(0)
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
end = False
group_count = 0
total_seq = 0
while True:
group_count += 1
with open(result_file + str("%04i.fasta" % group_count), "w") as f:
for i in range(nb_sequences):
try:
name = ""
seq = next(fasta_sequences)
total_seq += 1
except:
print("All sequences treated")
if total_seq % nb_sequences != 0:
print("WARNING: Number of sequences not a multiple of %i"\)
% nb_sequences
sys.exit(0)
f.write(">" + seq.name + "\n" + str(seq.seq) + "\n")
|
enormandeau/Scripts
|
fasta_extract_group.py
|
Python
|
gpl-3.0
| 1,332
|
[
"Biopython"
] |
dfb2697987404134a2a3762fc4f0567bf04a42554893dabd3c038a65bffb771b
|
from setuptools import setup
setup(
name = 'tracermppt',
py_modules = ['tracermppt'],
version = '0.3',
description = 'Interface for controlling and interrogating the '
'Tracer-2210RN and similar charge cotnrollers via the remote '
'monitoring port',
author = 'Brian Mayton',
author_email = 'bmayton@media.mit.edu',
license = 'MIT',
url = 'https://github.com/bmayton/tracermppt',
download_url = 'https://github.com/bmayton/tracermppt/tarball/0.3',
keywords = [],
classifiers = [],
install_requires = [
"enum34",
"pyserial",
]
)
|
bmayton/tracermppt
|
setup.py
|
Python
|
mit
| 612
|
[
"Brian"
] |
a68eb2554c7e8e47fe6a8610fdca6789c8f1a3f1fca8d3439ec02861be21c202
|
"""A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import glob
import shutil
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd
from IPython.utils import tz
from IPython.html.utils import is_hidden, to_os_path
def sort_key(item):
"""Case-insensitive sorting."""
return item['name'].lower()
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FileNotebookManager(NotebookManager):
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
notebook_dir = Unicode(getcwd(), config=True)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.exists(new) or not os.path.isdir(new):
raise TraitError("notebook dir %r is not a directory" % new)
checkpoint_dir = Unicode(config=True,
help="""The location in which to keep notebook checkpoints
By default, it is notebook-dir/.ipynb_checkpoints
"""
)
def _checkpoint_dir_default(self):
return os.path.join(self.notebook_dir, '.ipynb_checkpoints')
def _checkpoint_dir_changed(self, name, old, new):
"""do a bit of validation of the checkpoint dir"""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
abs_new = os.path.abspath(new)
self.checkpoint_dir = abs_new
return
if os.path.exists(new) and not os.path.isdir(new):
raise TraitError("checkpoint dir %r is not a directory" % new)
if not os.path.exists(new):
self.log.info("Creating checkpoint dir %s", new)
try:
os.mkdir(new)
except:
raise TraitError("Couldn't create checkpoint dir %r" % new)
def _copy(self, src, dest):
"""copy src to dest
like shutil.copy2, but log errors in copystat
"""
shutil.copyfile(src, dest)
try:
shutil.copystat(src, dest)
except OSError as e:
self.log.debug("copystat on %s failed", dest, exc_info=True)
def get_notebook_names(self, path=''):
"""List all notebook names in the notebook dir and path."""
path = path.strip('/')
if not os.path.isdir(self._get_os_path(path=path)):
raise web.HTTPError(404, 'Directory not found: ' + path)
names = glob.glob(self._get_os_path('*'+self.filename_ext, path))
names = [os.path.basename(name)
for name in names]
return names
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.notebook_dir)
def _get_os_path(self, name=None, path=''):
"""Given a notebook name and a URL path, return its file system
path.
Parameters
----------
name : string
The name of a notebook file with the .ipynb extension
path : string
The relative URL path (with '/' as separator) to the named
notebook.
Returns
-------
path : string
A file system path that combines notebook_dir (location where
server started), the relative path, and the filename with the
current operating system's url.
"""
if name is not None:
path = path + '/' + name
return to_os_path(path, self.notebook_dir)
def notebook_exists(self, name, path=''):
"""Returns a True if the notebook exists. Else, returns False.
Parameters
----------
name : string
The name of the notebook you are checking.
path : string
The relative path to the notebook (with '/' as separator)
Returns
-------
bool
"""
path = path.strip('/')
nbpath = self._get_os_path(name, path=path)
return os.path.isfile(nbpath)
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def list_dirs(self, path):
"""List the directories for a given API style path."""
path = path.strip('/')
os_path = self._get_os_path('', path)
if not os.path.isdir(os_path):
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
elif is_hidden(os_path, self.notebook_dir):
self.log.info("Refusing to serve hidden directory, via 404 Error")
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
dir_names = os.listdir(os_path)
dirs = []
for name in dir_names:
os_path = self._get_os_path(name, path)
if os.path.isdir(os_path) and not is_hidden(os_path, self.notebook_dir)\
and self.should_list(name):
try:
model = self.get_dir_model(name, path)
except IOError:
pass
dirs.append(model)
dirs = sorted(dirs, key=sort_key)
return dirs
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def get_dir_model(self, name, path=''):
"""Get the directory model given a directory name and its API style path"""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isdir(os_path):
raise IOError('directory does not exist: %r' % os_path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'directory'
return model
def list_notebooks(self, path):
"""Returns a list of dictionaries that are the standard model
for all notebooks in the relative 'path'.
Parameters
----------
path : str
the URL path that describes the relative path for the
listed notebooks
Returns
-------
notebooks : list of dicts
a list of the notebook models without 'content'
"""
path = path.strip('/')
notebook_names = self.get_notebook_names(path)
notebooks = [self.get_notebook(name, path, content=False)
for name in notebook_names if self.should_list(name)]
notebooks = sorted(notebooks, key=sort_key)
return notebooks
def get_notebook(self, name, path='', content=True):
""" Takes a path and name for a notebook and returns its model
Parameters
----------
name : str
the name of the notebook
path : str
the URL path that describes the relative path for
the notebook
Returns
-------
model : dict
the notebook model. If contents=True, returns the 'contents'
dict in the model as well.
"""
path = path.strip('/')
if not self.notebook_exists(name=name, path=path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % name)
os_path = self._get_os_path(name, path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'notebook'
if content:
with io.open(os_path, 'r', encoding='utf-8') as f:
try:
nb = current.read(f, u'json')
except Exception as e:
raise web.HTTPError(400, u"Unreadable Notebook: %s %s" % (os_path, e))
self.mark_trusted_cells(nb, name, path)
model['content'] = nb
return model
def save_notebook(self, model, name='', path=''):
"""Save the notebook model and return the model with no content."""
path = path.strip('/')
if 'content' not in model:
raise web.HTTPError(400, u'No notebook JSON data provided')
# One checkpoint should always exist
if self.notebook_exists(name, path) and not self.list_checkpoints(name, path):
self.create_checkpoint(name, path)
new_path = model.get('path', path).strip('/')
new_name = model.get('name', name)
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
# Save the notebook file
os_path = self._get_os_path(new_name, new_path)
nb = current.to_notebook_json(model['content'])
self.check_and_sign(nb, new_name, new_path)
if 'name' in nb['metadata']:
nb['metadata']['name'] = u''
try:
self.log.debug("Autosaving notebook %s", os_path)
with io.open(os_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while autosaving notebook: %s %s' % (os_path, e))
# Save .py script as well
if self.save_script:
py_path = os.path.splitext(os_path)[0] + '.py'
self.log.debug("Writing script %s", py_path)
try:
with io.open(py_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s %s' % (py_path, e))
model = self.get_notebook(new_name, new_path, content=False)
return model
def update_notebook(self, model, name, path=''):
"""Update the notebook's path and/or name"""
path = path.strip('/')
new_name = model.get('name', name)
new_path = model.get('path', path).strip('/')
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
model = self.get_notebook(new_name, new_path, content=False)
return model
def delete_notebook(self, name, path=''):
"""Delete notebook by name and path."""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isfile(os_path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % os_path)
# clear checkpoints
for checkpoint in self.list_checkpoints(name, path):
checkpoint_id = checkpoint['id']
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if os.path.isfile(cp_path):
self.log.debug("Unlinking checkpoint %s", cp_path)
os.unlink(cp_path)
self.log.debug("Unlinking notebook %s", os_path)
os.unlink(os_path)
def rename_notebook(self, old_name, old_path, new_name, new_path):
"""Rename a notebook."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_name == old_name and new_path == old_path:
return
new_os_path = self._get_os_path(new_name, new_path)
old_os_path = self._get_os_path(old_name, old_path)
# Should we proceed with the move?
if os.path.isfile(new_os_path):
raise web.HTTPError(409, u'Notebook with name already exists: %s' % new_os_path)
if self.save_script:
old_py_path = os.path.splitext(old_os_path)[0] + '.py'
new_py_path = os.path.splitext(new_os_path)[0] + '.py'
if os.path.isfile(new_py_path):
raise web.HTTPError(409, u'Python script with name already exists: %s' % new_py_path)
# Move the notebook file
try:
os.rename(old_os_path, new_os_path)
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming notebook: %s %s' % (old_os_path, e))
# Move the checkpoints
old_checkpoints = self.list_checkpoints(old_name, old_path)
for cp in old_checkpoints:
checkpoint_id = cp['id']
old_cp_path = self.get_checkpoint_path(checkpoint_id, old_name, old_path)
new_cp_path = self.get_checkpoint_path(checkpoint_id, new_name, new_path)
if os.path.isfile(old_cp_path):
self.log.debug("Renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
os.rename(old_cp_path, new_cp_path)
# Move the .py script
if self.save_script:
os.rename(old_py_path, new_py_path)
# Checkpoint-related utilities
def get_checkpoint_path(self, checkpoint_id, name, path=''):
"""find the path to a checkpoint"""
path = path.strip('/')
basename, _ = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=self.filename_ext,
)
cp_path = os.path.join(path, self.checkpoint_dir, filename)
return cp_path
def get_checkpoint_model(self, checkpoint_id, name, path=''):
"""construct the info dict for a given checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
stats = os.stat(cp_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id = checkpoint_id,
last_modified = last_modified,
)
return info
# public checkpoint API
def create_checkpoint(self, name, path=''):
"""Create a checkpoint from the current state of a notebook"""
path = path.strip('/')
nb_path = self._get_os_path(name, path)
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
self.log.debug("creating checkpoint for notebook %s", name)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
self._copy(nb_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_model(checkpoint_id, name, path)
def list_checkpoints(self, name, path=''):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.exists(path):
return []
else:
return [self.get_checkpoint_model(checkpoint_id, name, path)]
def restore_checkpoint(self, checkpoint_id, name, path=''):
"""restore a notebook to a checkpointed state"""
path = path.strip('/')
self.log.info("restoring Notebook %s from checkpoint %s", name, checkpoint_id)
nb_path = self._get_os_path(name, path)
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable notebook)
with io.open(cp_path, 'r', encoding='utf-8') as f:
current.read(f, u'json')
self._copy(cp_path, nb_path)
self.log.debug("copying %s -> %s", cp_path, nb_path)
def delete_checkpoint(self, checkpoint_id, name, path=''):
"""delete a notebook's checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s%s-%s' % (path, name, checkpoint_id)
)
self.log.debug("unlinking %s", cp_path)
os.unlink(cp_path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.notebook_dir
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/IPython/html/services/notebooks/filenbmanager.py
|
Python
|
gpl-3.0
| 18,871
|
[
"Brian"
] |
626e570c0dff914437e1c940ad811cf18d9702fc0f11dc32b3510ae82188942c
|
import os
import os.path
try:
from ase.units import AUT # requires rev1839 or later
except ImportError:
from ase.units import second, alpha, _hbar, _me, _c
AUT = second * _hbar / (alpha**2 * _me * _c**2)
del second, alpha, _hbar, _me, _c
from ase.units import Bohr, Hartree
from ase.data import atomic_names
from ase.atoms import Atoms
import numpy as np
import gpaw.mpi as mpi
import os,time,tempfile
def open(filename, mode='r', comm=mpi.world):
if filename.endswith('.nc'):
import gpaw.io.netcdf as io
elif filename.endswith('.db'):
import gpaw.io.cmr_io as io
elif filename.endswith('.hdf5'):
import gpaw.io.hdf5 as io
else:
if not filename.endswith('.gpw'):
filename += '.gpw'
import gpaw.io.tar as io
if mode == 'r':
return io.Reader(filename, comm)
elif mode == 'w':
return io.Writer(filename, comm)
else:
raise ValueError("Illegal mode! Use 'r' or 'w'.")
def wave_function_name_template(mode):
try:
ftype, template = mode.split(':')
except:
ftype = mode
template = 'wfs/psit_Gs%dk%dn%d'
return ftype, template
def write(paw, filename, mode, cmr_params=None, **kwargs):
"""Write state to file.
The `mode` argument should be one of:
``''``:
Don't write the wave functions.
``'all'``:
Write also the wave functions to the file.
``'nc'`` or ``'gpw'``:
Write wave functions as separate files (the default filenames
are ``'psit_Gs%dk%dn%d.nc' % (s, k, n)`` for ``'nc'``, where
``s``, ``k`` and ``n`` are spin, **k**-point and band indices). XXX
``'nc:mywfs/psit_Gs%dk%dn%d'``:
Defines the filenames to be ``'mywfs/psit_Gs%dk%dn%d' % (s, k, n)``.
The directory ``mywfs`` is created if not present. XXX
cmr_params specifies the parameters that should be used for CMR.
(Computational Materials Repository)
Please note: mode argument is ignored by for CMR.
"""
wfs = paw.wfs
scf = paw.scf
density = paw.density
hamiltonian = paw.hamiltonian
world = paw.wfs.world
domain_comm = wfs.gd.comm
kpt_comm = wfs.kpt_comm
band_comm = wfs.band_comm
master = (world.rank == 0)
atoms = paw.atoms
natoms = len(atoms)
magmom_a = paw.get_magnetic_moments()
hdf5 = filename.endswith('.hdf5')
if master or hdf5:
w = open(filename, 'w', world)
w['history'] = 'GPAW restart file'
w['version'] = '0.8'
w['lengthunit'] = 'Bohr'
w['energyunit'] = 'Hartree'
try:
tag_a = atoms.get_tags()
if tag_a is None:
raise KeyError
except KeyError:
tag_a = np.zeros(natoms, int)
w.dimension('natoms', natoms)
w.dimension('3', 3)
w.add('AtomicNumbers', ('natoms',),
atoms.get_atomic_numbers(), units=(0, 0, 0))
w.add('CartesianPositions', ('natoms', '3'),
atoms.get_positions() / Bohr, units=(1, 0, 0))
w.add('MagneticMoments', ('natoms',), magmom_a, units=(0, 0, 0))
w.add('Tags', ('natoms',), tag_a, units=(0, 0, 0))
w.add('BoundaryConditions', ('3',), atoms.get_pbc(), units=(0, 0, 0))
w.add('UnitCell', ('3', '3'), atoms.get_cell() / Bohr, units=(1, 0, 0))
if atoms.get_velocities() is not None:
w.add('CartesianVelocities', ('natoms', '3'),
atoms.get_velocities() * AUT / Bohr, units=(1, 0, -1))
w.add('PotentialEnergy', (), hamiltonian.Etot + 0.5 * hamiltonian.S,
units=(0, 1, 0))
if paw.forces.F_av is not None:
w.add('CartesianForces', ('natoms', '3'), paw.forces.F_av,
units=(-1, 1, 0))
# Write the k-points:
if wfs.kd.N_c is not None:
w.add('NBZKPoints', ('3'), wfs.kd.N_c)
w.dimension('nbzkpts', len(wfs.bzk_kc))
w.dimension('nibzkpts', len(wfs.ibzk_kc))
w.add('BZKPoints', ('nbzkpts', '3'), wfs.bzk_kc)
w.add('IBZKPoints', ('nibzkpts', '3'), wfs.ibzk_kc)
w.add('IBZKPointWeights', ('nibzkpts',), wfs.weight_k)
# Create dimensions for varioius netCDF variables:
ng = wfs.gd.get_size_of_global_array()
w.dimension('ngptsx', ng[0])
w.dimension('ngptsy', ng[1])
w.dimension('ngptsz', ng[2])
ng = density.finegd.get_size_of_global_array()
w.dimension('nfinegptsx', ng[0])
w.dimension('nfinegptsy', ng[1])
w.dimension('nfinegptsz', ng[2])
w.dimension('nspins', wfs.nspins)
w.dimension('nbands', wfs.nbands)
nproj = sum([setup.ni for setup in wfs.setups])
nadm = sum([setup.ni * (setup.ni + 1) // 2 for setup in wfs.setups])
w.dimension('nproj', nproj)
w.dimension('nadm', nadm)
p = paw.input_parameters
# Write various parameters:
(w['KohnShamStencil'],
w['InterpolationStencil']) = p['stencils']
w['PoissonStencil'] = paw.hamiltonian.poisson.get_stencil()
w['XCFunctional'] = paw.hamiltonian.xc.name
w['Charge'] = p['charge']
w['FixMagneticMoment'] = paw.occupations.fixmagmom
w['UseSymmetry'] = p['usesymm']
w['Converged'] = scf.converged
w['FermiWidth'] = paw.occupations.width
w['MixClass'] = density.mixer.__class__.__name__
w['MixBeta'] = density.mixer.beta
w['MixOld'] = density.mixer.nmaxold
w['MixWeight'] = density.mixer.weight
w['MaximumAngularMomentum'] = p.lmax
w['SoftGauss'] = False
w['FixDensity'] = p.fixdensity
w['DensityConvergenceCriterion'] = p['convergence']['density']
w['EnergyConvergenceCriterion'] = p['convergence']['energy'] / Hartree
w['EigenstatesConvergenceCriterion'] = p['convergence']['eigenstates']
w['NumberOfBandsToConverge'] = p['convergence']['bands']
w['Ekin'] = hamiltonian.Ekin
w['Epot'] = hamiltonian.Epot
w['Ebar'] = hamiltonian.Ebar
w['Eext'] = hamiltonian.Eext
w['Exc'] = hamiltonian.Exc
w['S'] = hamiltonian.S
try:
if paw.occupations.fixmagmom:
w['FermiLevel'] = paw.occupations.get_fermi_levels_mean()
w['FermiSplit'] = paw.occupations.get_fermi_splitting()
else:
w['FermiLevel'] = paw.occupations.get_fermi_level()
except ValueError:
# Zero temperature calculation - don't write Fermi level:
pass
# write errors
w['DensityError'] = scf.density_error
w['EnergyError'] = scf.energy_error
w['EigenstateError'] = scf.eigenstates_error
if wfs.dtype == float:
w['DataType'] = 'Float'
else:
w['DataType'] = 'Complex'
# Try to write time and kick strength in time-propagation TDDFT:
for attr, name in [('time', 'Time'), ('niter', 'TimeSteps'), \
('kick_strength', 'AbsorptionKick')]:
if hasattr(paw, attr):
value = getattr(paw, attr)
if isinstance(value, np.ndarray):
w.add(name, ('3',), value)
else:
w[name] = value
w['Mode'] = p.mode
# Write fingerprint (md5-digest) for all setups:
for setup in wfs.setups.setups.values():
key = atomic_names[setup.Z] + 'Fingerprint'
if setup.type != 'paw':
key += '(%s)' % setup.type
w[key] = setup.fingerprint
setup_types = p['setups']
if isinstance(setup_types, str):
setup_types = {None: setup_types}
for key, value in setup_types.items():
if not isinstance(value, str):
# Setups which are not strings are assumed to be
# runtime-dependent and should *not* be saved. We'll
# just discard the whole dictionary
setup_types = None
break
w['SetupTypes'] = repr(setup_types)
basis = p['basis'] # And similarly for basis sets
if isinstance(basis, dict):
for key, value in basis.items():
if not isinstance(value, str):
basis = None
w['BasisSet'] = repr(basis)
dtype = {float: float, complex: complex}[wfs.dtype]
else:
w = None
# Write projections:
if master or hdf5:
w.add('Projections', ('nspins', 'nibzkpts', 'nbands', 'nproj'),
dtype=dtype)
for s in range(wfs.nspins):
for k in range(wfs.nibzkpts):
all_P_ni = wfs.collect_projections(k, s)
if master:
w.fill(all_P_ni, s, k)
# Write atomic density matrices and non-local part of hamiltonian:
if master:
all_D_sp = np.empty((wfs.nspins, nadm))
all_H_sp = np.empty((wfs.nspins, nadm))
p1 = 0
for a in range(natoms):
ni = wfs.setups[a].ni
nii = ni * (ni + 1) // 2
if a in density.D_asp:
D_sp = density.D_asp[a]
dH_sp = hamiltonian.dH_asp[a]
else:
D_sp = np.empty((wfs.nspins, nii))
domain_comm.receive(D_sp, wfs.rank_a[a], 207)
dH_sp = np.empty((wfs.nspins, nii))
domain_comm.receive(dH_sp, wfs.rank_a[a], 2071)
p2 = p1 + nii
all_D_sp[:, p1:p2] = D_sp
all_H_sp[:, p1:p2] = dH_sp
p1 = p2
assert p2 == nadm
elif kpt_comm.rank == 0 and band_comm.rank == 0:
for a in range(natoms):
if a in density.D_asp:
domain_comm.send(density.D_asp[a], 0, 207)
domain_comm.send(hamiltonian.dH_asp[a], 0, 2071)
if master or hdf5:
w.add('AtomicDensityMatrices', ('nspins', 'nadm'), dtype=float)
if master:
w.fill(all_D_sp)
if master or hdf5:
w.add('NonLocalPartOfHamiltonian', ('nspins', 'nadm'), dtype=float)
if master:
w.fill(all_H_sp)
# Write the eigenvalues and occupation numbers:
for name, var in [('Eigenvalues', 'eps_n'), ('OccupationNumbers', 'f_n')]:
if master or hdf5:
w.add(name, ('nspins', 'nibzkpts', 'nbands'), dtype=float)
for s in range(wfs.nspins):
for k in range(wfs.nibzkpts):
a_n = wfs.collect_array(var, k, s)
if master:
w.fill(a_n, s, k)
# Attempt to read the number of delta-scf orbitals:
if hasattr(paw.occupations, 'norbitals'):
norbitals = paw.occupations.norbitals
else:
norbitals = None
# Write the linear expansion coefficients for Delta SCF:
if mode == 'all' and norbitals is not None:
if master or hdf5:
w.dimension('norbitals', norbitals)
w.add('LinearExpansionOccupations', ('nspins',
'nibzkpts', 'norbitals'), dtype=float)
for s in range(wfs.nspins):
for k in range(wfs.nibzkpts):
ne_o = wfs.collect_auxiliary('ne_o', k, s, shape=norbitals)
if master:
w.fill(ne_o, s, k)
if master or hdf5:
w.add('LinearExpansionCoefficients', ('nspins',
'nibzkpts', 'norbitals', 'nbands'), dtype=complex)
for s in range(wfs.nspins):
for k in range(wfs.nibzkpts):
for o in range(norbitals):
c_n = wfs.collect_array('c_on', k, s, subset=o)
if master:
w.fill(c_n, s, k, o)
# Write the pseudodensity on the coarse grid:
if master or hdf5:
w.add('PseudoElectronDensity',
('nspins', 'ngptsx', 'ngptsy', 'ngptsz'), dtype=float)
for s in range(wfs.nspins):
if hdf5:
do_write = (kpt_comm.rank == 0)
indices = [s,] + wfs.gd.get_slice()
w.fill(density.nt_sG[s], parallel=True, write=do_write,
*indices)
elif kpt_comm.rank == 0:
nt_sG = wfs.gd.collect(density.nt_sG[s])
if master:
w.fill(nt_sG, s)
# Write the pseudopotential on the coarse grid:
if master or hdf5:
w.add('PseudoPotential',
('nspins', 'ngptsx', 'ngptsy', 'ngptsz'), dtype=float)
for s in range(wfs.nspins):
if hdf5:
do_write = (kpt_comm.rank == 0)
indices = [s,] + wfs.gd.get_slice()
w.fill(hamiltonian.vt_sG[s], parallel=True, write=do_write,
*indices)
elif kpt_comm.rank == 0:
vt_sG = wfs.gd.collect(hamiltonian.vt_sG[s])
if master:
w.fill(vt_sG, s)
hamiltonian.xc.write(w, natoms)
if mode == 'all':
wfs.write_wave_functions(w)
elif mode != '':
# Write the wave functions as seperate files
# check if we need subdirs and have to create them
ftype, template = wave_function_name_template(mode)
dirname = os.path.dirname(template)
if dirname:
if master and not os.path.isdir(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
else:
raise RuntimeError("Can't create subdir " + dirname)
else:
dirname = '.'
# the slaves have to wait until the directory is created
world.barrier()
print >> paw.txt, 'Writing wave functions to', dirname,\
'using mode=', mode
ngd = wfs.gd.get_size_of_global_array()
for s in range(wfs.nspins):
for k in range(wfs.nibzkpts):
for n in range(wfs.nbands):
psit_G = wfs.get_wave_function_array(n, k, s)
if master:
fname = template % (s, k, n) + '.' + ftype
wpsi = open(fname, 'w')
wpsi.dimension('1', 1)
wpsi.dimension('ngptsx', ngd[0])
wpsi.dimension('ngptsy', ngd[1])
wpsi.dimension('ngptsz', ngd[2])
wpsi.add('PseudoWaveFunction',
('1', 'ngptsx', 'ngptsy', 'ngptsz'),
dtype=dtype)
wpsi.fill(psit_G)
wpsi.close()
db = False
if filename.endswith('.db'):
if master:
w.write_additional_db_params(cmr_params=cmr_params)
elif cmr_params is not None and 'db' in cmr_params:
db = cmr_params['db']
if master or hdf5:
# Close the file here to ensure that the last wave function is
# written to disk:
w.close()
# We don't want the slaves to start reading before the master has
# finished writing:
world.barrier()
# Creates a db file for CMR, if requested
if db and not filename.endswith('.db'):
#Write a db copy to the database
write(paw, '.db', mode='', cmr_params=cmr_params, **kwargs)
def read(paw, reader):
r = reader
wfs = paw.wfs
density = paw.density
density.allocate()
hamiltonian = paw.hamiltonian
hamiltonian.allocate()
natoms = len(paw.atoms)
world = paw.wfs.world
domain_comm = wfs.gd.comm
kpt_comm = wfs.kpt_comm
band_comm = wfs.band_comm
version = r['version']
hdf5 = hasattr(r, 'hdf5_reader')
# Verify setup fingerprints and count projectors and atomic matrices:
for setup in wfs.setups.setups.values():
try:
key = atomic_names[setup.Z] + 'Fingerprint'
if setup.type != 'paw':
key += '(%s)' % setup.type
if setup.fingerprint != r[key]:
str = 'Setup for %s (%s) not compatible with restart file.' \
% (setup.symbol, setup.filename)
if paw.input_parameters['idiotproof']:
raise RuntimeError(str)
else:
paw.warn(str)
except (AttributeError, KeyError):
str = 'Fingerprint of setup for %s (%s) not in restart file.' \
% (setup.symbol, setup.filename)
if paw.input_parameters['idiotproof']:
raise RuntimeError(str)
else:
paw.warn(str)
nproj = sum([setup.ni for setup in wfs.setups])
nadm = sum([setup.ni * (setup.ni + 1) // 2 for setup in wfs.setups])
# Verify dimensions for minimally required netCDF variables:
ng = wfs.gd.get_size_of_global_array()
nfg = density.finegd.get_size_of_global_array()
shapes = {'ngptsx': ng[0],
'ngptsy': ng[1],
'ngptsz': ng[2],
'nspins': wfs.nspins,
'nproj' : nproj,
'nadm' : nadm}
for name,dim in shapes.items():
if r.dimension(name) != dim:
raise ValueError('shape mismatch: expected %s=%d' % (name,dim))
# Read pseudoelectron density on the coarse grid
# and distribute out to nodes:
nt_sG = wfs.gd.empty(density.nspins)
if hdf5:
indices = [slice(0, density.nspins),] + wfs.gd.get_slice()
nt_sG[:] = r.get('PseudoElectronDensity', *indices)
else:
for s in range(density.nspins):
wfs.gd.distribute(r.get('PseudoElectronDensity', s), nt_sG[s])
# Read atomic density matrices
D_asp = {}
density.rank_a = np.zeros(natoms, int)
if domain_comm.rank == 0:
D_asp = read_atomic_matrices(r, 'AtomicDensityMatrices', wfs.setups)
density.initialize_directly_from_arrays(nt_sG, D_asp)
# Read pseudo potential on the coarse grid
# and distribute out to nodes:
if version > 0.3:
hamiltonian.vt_sG = wfs.gd.empty(hamiltonian.nspins)
if hdf5:
indices = [slice(0, hamiltonian.nspins), ] + wfs.gd.get_slice()
hamiltonian.vt_sG[:] = r.get('PseudoPotential', *indices)
else:
for s in range(hamiltonian.nspins):
wfs.gd.distribute(r.get('PseudoPotential', s),
hamiltonian.vt_sG[s])
# Read non-local part of hamiltonian
hamiltonian.dH_asp = {}
hamiltonian.rank_a = np.zeros(natoms, int)
if domain_comm.rank == 0 and version > 0.3:
hamiltonian.dH_asp = read_atomic_matrices(r, \
'NonLocalPartOfHamiltonian', wfs.setups)
hamiltonian.Ekin = r['Ekin']
hamiltonian.Epot = r['Epot']
hamiltonian.Ebar = r['Ebar']
try:
hamiltonian.Eext = r['Eext']
except (AttributeError, KeyError):
hamiltonian.Eext = 0.0
hamiltonian.Exc = r['Exc']
hamiltonian.S = r['S']
hamiltonian.Etot = r.get('PotentialEnergy') - 0.5 * hamiltonian.S
wfs.rank_a = np.zeros(natoms, int)
if version > 0.3:
density_error = r['DensityError']
if density_error is not None:
density.mixer.set_charge_sloshing(density_error)
Etot = hamiltonian.Etot
energy_error = r['EnergyError']
if energy_error is not None:
paw.scf.energies = [Etot, Etot + energy_error, Etot]
else:
paw.scf.converged = r['Converged']
if version > 0.6:
if paw.occupations.fixmagmom:
if 'FermiLevel' in r.get_parameters():
paw.occupations.set_fermi_levels_mean(r['FermiLevel'])
if 'FermiSplit' in r.get_parameters():
paw.occupations.set_fermi_splitting(r['FermiSplit'])
else:
if 'FermiLevel' in r.get_parameters():
paw.occupations.set_fermi_level(r['FermiLevel'])
else:
if not paw.input_parameters.fixmom and 'FermiLevel' in r.get_parameters():
paw.occupations.set_fermi_level(r['FermiLevel'])
#paw.occupations.magmom = paw.atoms.get_initial_magnetic_moments().sum()
# Try to read the current time and kick strength in time-propagation TDDFT:
for attr, name in [('time', 'Time'), ('niter', 'TimeSteps'), \
('kick_strength', 'AbsorptionKick')]:
if hasattr(paw, attr):
try:
if r.has_array(name):
value = r.get(name)
else:
value = r[name]
setattr(paw, attr, value)
except KeyError:
pass
# Try to read the number of Delta SCF orbitals
try:
norbitals = r.dimension('norbitals')
paw.occupations.norbitals = norbitals
except (AttributeError, KeyError):
norbitals = None
# Wave functions and eigenvalues:
dtype = r['DataType']
if dtype == 'Float' and paw.input_parameters['dtype']!=complex:
wfs.dtype = float
else:
wfs.dtype = complex
nibzkpts = r.dimension('nibzkpts')
nbands = r.dimension('nbands')
nslice = wfs.bd.get_slice()
if (nibzkpts == len(wfs.ibzk_kc) and
nbands == band_comm.size * wfs.mynbands):
# Verify that symmetries for for k-point reduction hasn't changed:
assert np.abs(r.get('IBZKPoints')-wfs.kd.ibzk_kc).max() < 1e-12
assert np.abs(r.get('IBZKPointWeights')-wfs.kd.weight_k).max() < 1e-12
for kpt in wfs.kpt_u:
# Eigenvalues and occupation numbers:
k = kpt.k
s = kpt.s
eps_n = r.get('Eigenvalues', s, k)
f_n = r.get('OccupationNumbers', s, k)
kpt.eps_n = eps_n[nslice].copy()
kpt.f_n = f_n[nslice].copy()
if norbitals is not None:
kpt.ne_o = np.empty(norbitals, dtype=float)
kpt.c_on = np.empty((norbitals, wfs.mynbands), dtype=complex)
for o in range(norbitals):
kpt.ne_o[o] = r.get('LinearExpansionOccupations', s, k, o)
c_n = r.get('LinearExpansionCoefficients', s, k, o)
kpt.c_on[o,:] = c_n[nslice]
if version > 0.3:
wfs.eigensolver.error = r['EigenstateError']
if (r.has_array('PseudoWaveFunctions') and
paw.input_parameters.mode == 'fd'):
if band_comm.size == 1 and not hdf5:
# We may not be able to keep all the wave
# functions in memory - so psit_nG will be a special type of
# array that is really just a reference to a file:
for kpt in wfs.kpt_u:
kpt.psit_nG = r.get_reference('PseudoWaveFunctions',
kpt.s, kpt.k)
else:
for kpt in wfs.kpt_u:
kpt.psit_nG = wfs.gd.empty(wfs.mynbands, wfs.dtype)
if hdf5:
indices = [kpt.s, kpt.k]
indices.append(wfs.bd.get_slice())
indices += wfs.gd.get_slice()
kpt.psit_nG[:] = r.get('PseudoWaveFunctions', *indices)
else:
# Read band by band to save memory
for myn, psit_G in enumerate(kpt.psit_nG):
n = wfs.bd.global_index(myn)
if domain_comm.rank == 0:
big_psit_G = np.array(
r.get('PseudoWaveFunctions',
kpt.s, kpt.k, n),
wfs.dtype)
else:
big_psit_G = None
wfs.gd.distribute(big_psit_G, psit_G)
if (r.has_array('WaveFunctionCoefficients') and
paw.input_parameters.mode == 'lcao'):
wfs.read_coefficients(r)
for u, kpt in enumerate(wfs.kpt_u):
P_ni = r.get('Projections', kpt.s, kpt.k)
i1 = 0
kpt.P_ani = {}
for a, setup in enumerate(wfs.setups):
i2 = i1 + setup.ni
if domain_comm.rank == 0:
kpt.P_ani[a] = np.array(P_ni[nslice, i1:i2], wfs.dtype)
i1 = i2
# Manage mode change:
paw.scf.check_convergence(density, wfs.eigensolver)
newmode = paw.input_parameters.mode
try:
oldmode = r['Mode']
except (AttributeError, KeyError):
oldmode = 'fd' # This is an old gpw file from before lcao existed
if newmode == 'lcao':
spos_ac = paw.atoms.get_scaled_positions() % 1.0
wfs.load_lazily(hamiltonian, spos_ac)
if newmode != oldmode:
paw.scf.reset()
# Get the forces from the old calculation:
if r.has_array('CartesianForces'):
paw.forces.F_av = r.get('CartesianForces')
else:
paw.forces.reset()
hamiltonian.xc.read(r)
def read_atoms(reader):
if isinstance(reader, str):
reader = open(filename, 'r')
positions = reader.get('CartesianPositions') * Bohr
numbers = reader.get('AtomicNumbers')
cell = reader.get('UnitCell') * Bohr
pbc = reader.get('BoundaryConditions')
tags = reader.get('Tags')
magmoms = reader.get('MagneticMoments')
atoms = Atoms(positions=positions,
numbers=numbers,
cell=cell,
pbc=pbc)
if tags.any():
atoms.set_tags(tags)
if magmoms.any():
atoms.set_initial_magnetic_moments(magmoms)
if reader.has_array('CartesianVelocities'):
velocities = reader.get('CartesianVelocities') * Bohr / AUT
atoms.set_velocities(velocities)
return atoms
def read_atomic_matrices(reader, key, setups):
all_M_sp = reader.get(key)
M_asp = {}
p1 = 0
for a, setup in enumerate(setups):
ni = setup.ni
p2 = p1 + ni * (ni + 1) // 2
M_asp[a] = all_M_sp[:, p1:p2].copy()
p1 = p2
return M_asp
def read_wave_function(gd, s, k, n, mode):
"""Read the wave function for spin s, kpoint k and index n
from a sperate file. The filename is determined from the mode
in the same way as in write() (see above)"""
ftype, template = wave_function_name_template(mode)
fname = template % (s,k,n) + '.'+ftype
## print 'fname=', fname
i = gd.get_slice()
r = open(fname, 'r')
psit_G = r.get('PseudoWaveFunction', 0)[i]
r.close()
return psit_G
|
qsnake/gpaw
|
gpaw/io/__init__.py
|
Python
|
gpl-3.0
| 26,399
|
[
"ASE",
"GPAW",
"NetCDF"
] |
74a822d364273f32b1ce0ee81594b0a7716ae1ee2cfb6a8d22c891c1d3ed7470
|
# -*- coding: utf-8 -*-
"""
Tests the "preview" selector in the LMS that allows changing between Staff, Student, and Content Groups.
"""
from nose.plugins.attrib import attr
from ..ga_role_helpers import GaccoTestRoleMixin
from ..helpers import UniqueCourseTest, create_user_partition_json
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from xmodule.partitions.partitions import Group
from textwrap import dedent
@attr('shard_3')
class StaffViewTest(UniqueCourseTest):
"""
Tests that verify the staff view.
"""
USERNAME = "STAFF_TESTER"
EMAIL = "johndoe@example.com"
def _auto_auth(self):
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=True).visit()
def setUp(self):
super(StaffViewTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture) # pylint: disable=no-member
self.course_fixture.install()
# Auto-auth register for the course.
# Do this as global staff so that you will see the Staff View
self._auto_auth()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_page.visit()
staff_page = StaffPage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
@attr('shard_3')
class CourseWithoutContentGroupsTest(StaffViewTest):
"""
Setup for tests that have no content restricted to specific content groups.
"""
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 2 problems.
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=problem_data),
XBlockFixtureDesc('problem', 'Test Problem 2', data=problem_data)
)
)
)
@attr('shard_3')
class StaffViewToggleTest(CourseWithoutContentGroupsTest, GaccoTestRoleMixin):
"""
Tests for the staff view toggle button.
"""
def test_instructor_tab_visibility_with_ga_global_course_creator(self):
"""
Test that the instructor tab is always hidden by GaGlobalCourseCreator.
"""
self.logout()
self.auto_auth_with_ga_global_course_creator(self.course_id)
self.courseware_page.visit()
self.assertFalse(self.courseware_page.has_tab('Instructor'))
def test_instructor_tab_visibility_with_ga_course_scorer(self):
"""
Test that the instructor tab is hidden when viewing as a student.
"""
self.logout()
self.auto_auth_with_ga_course_scorer(self.course_id)
course_page = self._goto_staff_page()
self.assertTrue(course_page.has_tab('Instructor'))
course_page.set_staff_view_mode('Student')
self.assertEqual(course_page.staff_view_mode, 'Student')
self.assertFalse(course_page.has_tab('Instructor'))
@attr('shard_3')
class StaffDebugTestWithGaCourseScorer(CourseWithoutContentGroupsTest, GaccoTestRoleMixin):
"""
Tests that verify the staff debug info.
"""
def _auto_auth(self):
self.user_info = self.auto_auth_with_ga_course_scorer(self.course_id)
def test_enabled_staff_debug(self):
"""
Test that ga_course_scorer can view staff debug info
"""
staff_page = self._goto_staff_page()
# 'Staff Debug Info' is capitalized.
# 'text-transform: uppercase' is set for .instructor-info-action
# in lms/static/sass/course/courseware/_courseware.scss
self.assertTrue(u'STAFF DEBUG INFO' in staff_page.q(css='a.instructor-info-action').text)
def test_reset_attempts_empty(self):
"""
Test that we reset even when there is no student state
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.user_info['username']), msg)
def test_reset_attempts_state(self):
"""
Successfully reset the student attempts
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.user_info['username']), msg)
def test_student_by_email(self):
"""
Successfully reset the student attempts using their email address
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts(self.user_info['email'])
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.user_info['email']), msg)
def test_reset_attempts_for_problem_loaded_via_ajax(self):
"""
Successfully reset the student attempts for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.user_info['username']), msg)
@attr('shard_3')
class StaffDebugTestWithGaGlobalCourseCreator(CourseWithoutContentGroupsTest, GaccoTestRoleMixin):
"""
Tests that verify the staff debug info.
"""
def _auto_auth(self):
self.user_info = self.auto_auth_with_ga_global_course_creator(self.course_id)
def test_disabled_staff_debug(self):
"""
Test that ga_global_course_creator cannot view staff debug info
"""
courseware_page = self.courseware_page.visit()
self.assertFalse(courseware_page.q(css='a.instructor-info-action').is_present())
@attr('shard_3')
class StudentHistoryViewTestWithGaCourseScorer(CourseWithoutContentGroupsTest, GaccoTestRoleMixin):
"""
Tests that verify the Student History View.
"""
def _auto_auth(self):
self.user_info = self.auto_auth_with_ga_course_scorer(self.course_id)
def test_enabled_student_history_view(self):
"""
Test that ga_course_scorer can view Student history
"""
staff_page = self._goto_staff_page()
# 'Submission history' is capitalized.
# 'text-transform: uppercase' is set for .instructor-info-action
# in lms/static/sass/course/courseware/_courseware.scss
self.assertTrue(u'SUBMISSION HISTORY' in staff_page.q(css='a.instructor-info-action').text)
@attr('shard_3')
class StudentHistoryViewTestWithGaGlobalCourseCreator(CourseWithoutContentGroupsTest, GaccoTestRoleMixin):
"""
Tests that verify the Student History View.
"""
def _auto_auth(self):
self.user_info = self.auto_auth_with_ga_global_course_creator(self.course_id)
def test_disabled_student_history_view(self):
"""
Test that ga_global_course_creator can view Student history
"""
courseware_page = self.courseware_page.visit()
self.assertFalse(courseware_page.q(css='a.instructor-info-action').is_present())
@attr('shard_3')
class CourseWithContentGroupsTest(StaffViewTest, GaccoTestRoleMixin):
"""
Verifies that changing the "View this course as" selector works properly for content groups.
"""
def _auto_auth(self):
self.auto_auth_with_ga_global_course_creator(self.course_id)
def setUp(self):
super(CourseWithContentGroupsTest, self).setUp()
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,beta',
'Content Group Partition',
[Group("0", 'alpha'), Group("1", 'beta')],
scheme="cohort"
)
],
},
})
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 3 problems.
One problem is visible to all, one problem is visible only to Group "alpha", and
one problem is visible only to Group "beta".
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
self.alpha_text = "VISIBLE TO ALPHA"
self.beta_text = "VISIBLE TO BETA"
self.everyone_text = "VISIBLE TO EVERYONE"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'problem', self.alpha_text, data=problem_data, metadata={"group_access": {0: [0]}}
),
XBlockFixtureDesc(
'problem', self.beta_text, data=problem_data, metadata={"group_access": {0: [1]}}
),
XBlockFixtureDesc('problem', self.everyone_text, data=problem_data)
)
)
)
)
def test_staff_sees_all_problems_with_ga_global_course_creator(self):
"""
Scenario: GaGlobalCourseCreator see all problems
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
Then I see all the problems, regardless of their group_access property
"""
self.logout()
self.auto_auth_with_ga_global_course_creator(self.course_id)
self.courseware_page.visit()
verify_expected_problem_visibility(self, self.courseware_page, [self.alpha_text, self.beta_text, self.everyone_text])
def test_staff_sees_all_problems_with_ga_course_scorer(self):
"""
Scenario: GaCourseScorer see all problems
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
Then I see all the problems, regardless of their group_access property
"""
self.logout()
self.auto_auth_with_ga_course_scorer(self.course_id)
self.courseware_page.visit()
verify_expected_problem_visibility(self, self.courseware_page, [self.alpha_text, self.beta_text, self.everyone_text])
def verify_expected_problem_visibility(test, courseware_page, expected_problems):
"""
Helper method that checks that the expected problems are visible on the current page.
"""
test.assertEqual(
len(expected_problems), courseware_page.num_xblock_components, "Incorrect number of visible problems"
)
for index, expected_problem in enumerate(expected_problems):
test.assertIn(expected_problem, courseware_page.xblock_components[index].text)
|
nttks/edx-platform
|
common/test/acceptance/tests/lms/test_ga_user_preview.py
|
Python
|
agpl-3.0
| 13,047
|
[
"VisIt"
] |
f4c0f4b260f32a1fdf2f73a6769a052741e89fd10f67fa5bf3167090ac16be63
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
import Avogadro
# always use 'Extension' for class name
class Extension(QObject):
def __init__(self):
QObject.__init__(self)
def name(self):
return "My Extension"
def description(self):
return "Extension for ..."
def actions(self):
actions = []
action = QAction(self)
action.setText("Some action")
actions.append(action)
return actions
def menuPath(self, action):
return "Extensions"
def performAction(self, action, glwidget):
if action.text() == "Some action":
# do something...
return None
|
rcplane/periodicdisplay
|
reference/avogadro/libavogadro/examples/python/extensiontemplate.py
|
Python
|
gpl-2.0
| 669
|
[
"Avogadro"
] |
3137b86654c389e44712be8f1b459531eceb5636271e5d09f039fadc9e5c7133
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.conf.urls.i18n import i18n_patterns
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from tuticfruti_blog.posts import views
urlpatterns = [
url(r'^$', views.PostListView.as_view(), name="home"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# Your stuff: custom urls includes go here
#CKEditor
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += i18n_patterns(
# User management
url(_(r'^users/'), include("tuticfruti_blog.users.urls", namespace="users")),
url(_(r'^accounts/'), include('allauth.urls')),
# Your stuff: custom urls includes go here
url(_(r'^posts/'), include('tuticfruti_blog.posts.urls', namespace='posts')), )
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
tuticfruti/tuticfruti_blog
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,459
|
[
"VisIt"
] |
82aadabac29c1031a78ad751c0d6c62ce054a245929904af1a65af78a2662e35
|
try: paraview.simple
except: from paraview.simple import *
Glyph3 = GetActiveSource()
AddAdditionalAttribute1 = AddAdditionalAttribute()
AddAdditionalAttribute1.AttributeName = 'Density'
AddAdditionalAttribute1.AdditionalAttributeFile = '/Users/corbett/Documents/Projects/Work/Viz/pvaddons/testdata/b1.00300.d0-1000.den'
DataRepresentation4 = GetDisplayProperties(Glyph3)
DataRepresentation5 = Show()
DataRepresentation5.EdgeColor = [0.0, 0.0, 0.50000762951094835]
DataRepresentation5.SelectionCellLabelColor = [0.0, 1.0, 0.0]
DataRepresentation5.SelectionPointLabelJustification = 'Center'
DataRepresentation5.SelectionCellLabelJustification = 'Center'
DataRepresentation5.PointSize = 1.0
DataRepresentation5.ColorAttributeType = 'POINT_DATA'
DataRepresentation5.ColorArrayName = 'global id'
DataRepresentation5.SelectionLineWidth = 2.0
DataRepresentation5.Texture = []
DataRepresentation5.SelectionCellLabelFontSize = 24
DataRepresentation5.SelectionColor = [0.048416876478217748, 0.63672846570534825, 1.0]
DataRepresentation5.SelectionRepresentation = 'Wireframe'
DataRepresentation5.LookupTable = []
DataRepresentation4.Visibility = 0
Render()
|
corbett/parastro
|
ExamplePython/AddAdditionalAttribute.py
|
Python
|
lgpl-3.0
| 1,153
|
[
"ParaView"
] |
e61b6b54b578df7f60d293126ba305a66f0372c6890e2fde758361edddd3c01a
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class OfCatalyst(CMakePackage):
"""Of-catalyst is a library for OpenFOAM that provides a runtime-selectable
function object for embedding ParaView Catalyst in-situ visualization
into arbitrary OpenFOAM simulations.
Supports in-situ conversion of the following types:
1) finite volume meshes and fields, single or multi-region;
2) finite area meshes and fields, single region;
3) lagrangian (clouds), single or multiple clouds.
This offering is part of the community repository supported by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
OpenCFD Ltd has been developing and releasing OpenFOAM since its debut
in 2004.
"""
# Currently only via git
homepage = "https://develop.openfoam.com/Community/catalyst"
git = "https://develop.openfoam.com/Community/catalyst.git"
version('develop', branch='develop')
version('1806', tag='v1806')
variant('full', default=False, description='Build against paraview (full) or catalyst (light)')
depends_on('openfoam@1806', when='@1806', type=('build', 'link', 'run'))
depends_on('openfoam@develop', when='@develop', type=('build', 'link', 'run'))
depends_on('catalyst@5.5:', when='~full')
depends_on('paraview@5.5:+osmesa~qt', when='+full')
root_cmakelists_dir = 'src/catalyst'
def cmake_args(self):
"""Populate cmake arguments for ParaView."""
cmake_args = [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=%s' % join_path(
self.stage.source_path,
'spack-build')
]
return cmake_args
|
LLNL/spack
|
var/spack/repos/builtin/packages/of-catalyst/package.py
|
Python
|
lgpl-2.1
| 1,893
|
[
"ParaView"
] |
85e85dae110e15b4c50f96e2f68748d1e286884d4b2e8f0239c611815da83156
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import numpy.testing as npt
import pandas as pd
from unittest import TestCase, main
from skbio import OrdinationResults
from skbio.stats.ordination import rda
from skbio.util import get_data_path, assert_ordination_results_equal
class TestRDAErrors(TestCase):
def setUp(self):
pass
def test_shape(self):
for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]:
Y = pd.DataFrame(np.random.randn(n, p))
X = pd.DataFrame(np.random.randn(n_, m))
yield npt.assert_raises, ValueError, rda, Y, X, None, None
class TestRDAResults(TestCase):
# STATUS: L&L only shows results with scaling 1, and they agree
# with vegan's (module multiplying by a constant). I can also
# compute scaling 2, agreeing with vegan, but there are no written
# results in L&L.
def setUp(self):
"""Data from table 11.3 in Legendre & Legendre 1998."""
self.sample_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
'Site5', 'Site6', 'Site7', 'Site8', 'Site9']
self.feature_ids = ['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5']
self.env_ids = list(map(str, range(4)))
self.pc_ids = ['RDA1', 'RDA2', 'RDA3', 'RDA4', 'RDA5', 'RDA6', 'RDA7']
self.Y = pd.DataFrame(
np.loadtxt(get_data_path('example2_Y')),
index=self.sample_ids, columns=self.feature_ids)
self.X = pd.DataFrame(
np.loadtxt(get_data_path('example2_X')),
index=self.sample_ids, columns=self.env_ids)
def test_scaling1(self):
scores = rda(self.Y, self.X, scaling=1)
sample_constraints = pd.DataFrame(np.loadtxt(
get_data_path('example2_sample_constraints_scaling1')))
# Load data as computed with vegan 2.0-8
vegan_features = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_species_scaling1_from_vegan')),
index=self.feature_ids,
columns=self.pc_ids)
vegan_samples = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_site_scaling1_from_vegan')),
index=self.sample_ids,
columns=self.pc_ids)
sample_constraints = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_sample_constraints_scaling1')),
index=self.sample_ids,
columns=self.pc_ids)
mat = np.loadtxt(get_data_path(
'example2_biplot_scaling1'))
cropped_pc_ids = self.pc_ids[:mat.shape[1]]
biplot_scores = pd.DataFrame(mat,
index=self.env_ids,
columns=cropped_pc_ids)
proportion_explained = pd.Series([0.44275783, 0.25614586,
0.15280354, 0.10497021,
0.02873375, 0.00987052,
0.00471828],
index=self.pc_ids)
eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
1.680705, 0.577350, 0.275984],
index=self.pc_ids)
exp = OrdinationResults(
'RDA', 'Redundancy Analysis',
samples=vegan_samples,
features=vegan_features,
sample_constraints=sample_constraints,
biplot_scores=biplot_scores,
proportion_explained=proportion_explained,
eigvals=eigvals)
assert_ordination_results_equal(scores, exp,
ignore_directionality=True,
decimal=6)
def test_scaling2(self):
scores = rda(self.Y, self.X, scaling=2)
mat = np.loadtxt(get_data_path('example2_biplot_scaling2'))
cropped_pc_ids = self.pc_ids[:mat.shape[1]]
biplot_scores = pd.DataFrame(mat,
index=self.env_ids,
columns=cropped_pc_ids)
sample_constraints = pd.DataFrame(np.loadtxt(
get_data_path('example2_sample_constraints_scaling2')))
# Load data as computed with vegan 2.0-8
vegan_features = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_species_scaling2_from_vegan')),
index=self.feature_ids,
columns=self.pc_ids)
vegan_samples = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_site_scaling2_from_vegan')),
index=self.sample_ids,
columns=self.pc_ids)
sample_constraints = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_sample_constraints_scaling2')),
index=self.sample_ids,
columns=self.pc_ids)
mat = np.loadtxt(get_data_path(
'example2_biplot_scaling2'))
cropped_pc_ids = self.pc_ids[:mat.shape[1]]
biplot_scores = pd.DataFrame(mat,
index=self.env_ids,
columns=cropped_pc_ids)
proportion_explained = pd.Series([0.44275783, 0.25614586,
0.15280354, 0.10497021,
0.02873375, 0.00987052,
0.00471828],
index=self.pc_ids)
eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
1.680705, 0.577350, 0.275984],
index=self.pc_ids)
exp = OrdinationResults(
'RDA', 'Redundancy Analysis',
samples=vegan_samples,
features=vegan_features,
sample_constraints=sample_constraints,
biplot_scores=biplot_scores,
proportion_explained=proportion_explained,
eigvals=eigvals)
assert_ordination_results_equal(scores, exp,
ignore_directionality=True,
decimal=6)
class TestRDAResults_biplot_score(TestCase):
# STATUS: L&L only shows results with scaling 1, and they agree
# with vegan's (module multiplying by a constant). I can also
# compute scaling 2, agreeing with vegan, but there are no written
# results in L&L.
def setUp(self):
"""varespec and varechem from Väre etal. 1995 DOI: 10.2307/3236351"""
self.Y = pd.read_csv(get_data_path('varespec.csv'), index_col=0)
self.X = pd.read_csv(get_data_path('varechem.csv'), index_col=0)
self.Y.index.name = None
self.X.index.name = None
def test_biplot_score(self):
rda_ = rda(y=self.Y, x=self.X, scale_Y=False, scaling=1)
# Load data as computed with vegan 2.4-3:
# library(vegan)
# data(varechem)
# data(varespec)
# rda_ = rda(X=varespec, Y=varechem, scale=FALSE)
# write.table(summary(rda_, scaling=1)$biplot,
# 'vare_rda_biplot_from_vegan.csv', sep=',')
# write.table(summary(rda_, scaling=1)$sites,
# 'vare_rda_sites_from_vegan.csv', sep=',')
# write.table(summary(rda_, scaling=1)$species,
# 'vare_rda_species_from_vegan.csv', sep=',')
# write.table(summary(rda_, scaling=1)$constraints, #
# 'vare_rda_constraints_from_vegan.csv', sep=',')
# write.table(summary(rda_, scaling=1)$cont$importance[2, ],
# 'vare_rda_propexpl_from_vegan.csv', sep=',')
# write.table(summary(rda_, scaling=1)$cont$importance[1, ],
# 'vare_rda_eigvals_from_vegan.csv', sep=',')
vegan_features = pd.read_csv(
get_data_path('vare_rda_species_from_vegan.csv'))
vegan_samples = pd.read_csv(
get_data_path('vare_rda_sites_from_vegan.csv'))
vegan_biplot = pd.read_csv(
get_data_path('vare_rda_biplot_from_vegan.csv'))
vegan_constraints = pd.read_csv(
get_data_path('vare_rda_constraints_from_vegan.csv'))
vegan_propexpl = pd.read_csv(
get_data_path('vare_rda_propexpl_from_vegan.csv'))
vegan_propexpl = pd.Series(
vegan_propexpl.x.values, index=rda_.eigvals.index)
vegan_eigvals = pd.read_csv(
get_data_path('vare_rda_eigvals_from_vegan.csv'))
vegan_eigvals = pd.Series(
vegan_eigvals.x.values, index=rda_.eigvals.index)
# scikit-bio returns singular values, whereas vegan returns eigenvalues
vegan_eigvals = np.sqrt(vegan_eigvals*vegan_eigvals.shape[0])
vegan_propexpl = vegan_eigvals/vegan_eigvals.sum()
# transform the output of rda_ to match column selection of vegan
res_samples = rda_.samples.iloc[:, 0:6]
res_features = rda_.features.iloc[:, 0:6]
rda_ = OrdinationResults(
'RDA', 'Redundancy Analysis',
samples=res_samples,
features=res_features,
sample_constraints=rda_.sample_constraints.iloc[:, 0:6],
biplot_scores=rda_.biplot_scores.iloc[:, 0:6],
proportion_explained=rda_.proportion_explained,
eigvals=rda_.eigvals)
exp = OrdinationResults(
'RDA', 'Redundancy Analysis',
samples=vegan_samples,
features=vegan_features,
sample_constraints=vegan_constraints,
biplot_scores=vegan_biplot,
proportion_explained=vegan_propexpl,
eigvals=vegan_eigvals)
# This scaling constant is required to make skbio comparable to vegan.
scaling = (rda_.eigvals[0] / rda_.eigvals[:6])
exp.biplot_scores *= scaling
assert_ordination_results_equal(
rda_, exp,
ignore_directionality=True,
decimal=6)
if __name__ == '__main__':
main()
|
gregcaporaso/scikit-bio
|
skbio/stats/ordination/tests/test_redundancy_analysis.py
|
Python
|
bsd-3-clause
| 10,422
|
[
"scikit-bio"
] |
13d456d23a1799537d057a9bf500f79cf5fb22bd2de2014101e52cc04c3bc537
|
#! /usr/bin/env python3
"""
retrain_emission.py: take an HDF5 file and segmentations, and output parameters of a mixture model.
"""
# std lib:
import argparse
import os
import sys
import random
from collections import defaultdict
from tqdm import tqdm
# numerics:
import numpy as np
import h5py
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
def pool_reads(h, K):
"""
Select (up to) K random segmented reads from the dataset `h`. Return as a dictionary of pooled scaled samples
of form { <REGION_NAME> :: str -> <SAMPLES> :: NDArray(float) }.
"""
# collect scaled samples for each state:
pool = defaultdict(list)
rnames = random.sample(h['scaled'].keys(), min(K, len(h['scaled'].keys())))
for rid in tqdm(rnames):
try:
assert(len(h['scaled'][rid]) == len(h['states'][rid]))
for k in range(len(h['states'][rid])):
pool[ h['states'][rid][k] ].append( h['scaled'][rid][k] )
except:
pass
# process into a dict of numpy arrays and return:
pool = dict(pool)
for k, v in pool.items():
pool[k] = np.array(v)
return pool
def retrain_emission(hdf_path, nreads, bayesian, components, verbose):
"""Retrain gaussian mixture model from parameters."""
# load dataset:
hdf = h5py.File(args.hdf_path, 'r')
assert ('states' in hdf.keys() and 'scaled' in hdf.keys()), \
"[retrain_emission.py] ERR: both `samples` and `states` must be groups in the HDF5."
# select up to `nreads` random segmented reads from the dataset:
print("[retrain_emission.py] Collecting and pooling {} random reads (this may take a while...)".format(nreads))
segments = pool_reads(hdf, nreads)
# compute GMM parameters for each segment:
CONFIG = {
'ncomp': components,
'niter': 100,
'ninit': 5,
'verbose': (1 if verbose else 0),
'bayesian': bayesian
}
print("----- TRAINING CONFIG -----")
for k,v in CONFIG.items():
print("* {0} = {1}".format(k,v))
gmm = {}
for k,v in segments.items():
if v.shape[0] < 10:
print("[retrain_emissions.py] Fewer than 10 samples for state {}; skipping...".format(k))
pass
# train GMM:
if CONFIG['bayesian']:
gmm[k] = BayesianGaussianMixture(
n_components=CONFIG['ncomp'], max_iter=CONFIG['niter'], n_init=CONFIG['ninit'],
verbose=CONFIG['verbose']).fit(v.reshape(-1,1))
else:
gmm[k] = GaussianMixture(
n_components=CONFIG['ncomp'], max_iter=CONFIG['niter'], n_init=CONFIG['ninit'],
verbose=CONFIG['verbose']).fit(v.reshape(-1,1))
# print mixture model properties for each segment:
for k,v in gmm.items():
print("===== [{}] =====".format(k))
print("* Weights: {}".format(v.weights_))
print("* Means: {}".format(v.means_))
print("* Covariances: {}".format(v.covariances_))
hdf.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train a mixture model.")
parser.add_argument("hdf_path",
help="Path to HDF5 file with segmented signal paths.")
parser.add_argument("--nreads", default=50, type=int,
help="Number of random reads to pool together and retrain upon. [50]")
parser.add_argument("--bayesian", default=False, action='store_true',
help="Use a dirichlet process mixture model. [False]")
parser.add_argument("--verbose", default=False, action='store_true',
help="Print verbose outputs during training. [False]")
parser.add_argument("--components", default=2, type=int,
help="If DPMM, max components; else fixed number of GMM components. [2]")
args = parser.parse_args()
assert (os.path.exists(args.hdf_path)), "File does not exist: {}".format(args.hdf_path)
retrain_emission(args.hdf_path, args.nreads, args.bayesian, args.components, args.verbose)
|
jts/nanopolish
|
scripts/polya_training/retrain_emission.py
|
Python
|
mit
| 4,076
|
[
"Gaussian"
] |
2aacc36906ebd6e0ff06bfeac99f52a5995c509dfb69f35f06e57c7681d7b111
|
#from opengmcore import _opengmcore.adder as adder
from opengmcore import *
from __version__ import version
from functionhelper import *
from _inf_param import _MetaInfParam , InfParam
from _visu import visualizeGm
from _misc import defaultAccumulator
from __version__ import version
import time
from _inference_interface_generator import _inject_interface , InferenceBase
import inference
import hdf5
import benchmark
# initialize solver/ inference dictionaries
_solverDicts=[
(inference.adder.minimizer.solver.__dict__ , 'adder', 'minimizer' ),
(inference.adder.maximizer.solver.__dict__, 'adder', 'maximizer' ),
(inference.multiplier.integrator.solver.__dict__,'adder', 'integrator'),
(inference.multiplier.minimizer.solver.__dict__, 'multiplier', 'minimizer' ),
(inference.multiplier.maximizer.solver.__dict__, 'multiplier', 'maximizer' ),
(inference.multiplier.integrator.solver.__dict__,'multiplier', 'integrator')
]
for infClass,infName in _inject_interface(_solverDicts):
inference.__dict__[infName]=infClass
class Timer:
def __init__(self, name=None , verbose = True):
self.name = name
self.verbose = verbose
def __enter__(self):
if self.name and self.verbose:
print '[%s]' % self.name
self.tstart = time.time()
return self
def __exit__(self, type, value, traceback):
#if self.name:
# print '[%s]' % self.name,
self.elapsed = time.time() - self.tstart
if self.verbose:
print ' Elapsed: %s' % (time.time() - self.tstart)
def weightRandomizer(noiseType = 'normalAdd', noiseParam=1.0, seed=42, ignoreSeed = True):
p = inference.adder.minimizer.solver._WeightRandomizerParameter_()
ntenum = inference.adder.minimizer.solver._WeightRandomization_NoiseType_
if noiseType == 'none' or noiseType =='noNoise':
nt =ntenum.none
elif noiseType == 'normalAdd':
nt =ntenum.normalAdd
elif noiseType == 'normalMult':
nt =ntenum.normalMult
elif noiseType == 'uniformAdd':
nt =ntenum.uniformAdd
else:
raise RuntimeError("unknown noise type")
p.noiseType = nt
p.noiseParam = float(noiseParam)
p.seed = int(seed)
p.ignoreSeed = bool(ignoreSeed)
return p
def saveGm(gm, f, d='gm'):
""" save a graphical model to a hdf5 file:
Args:
gm : graphical model to save
f : filepath
g : dataset (defaut : 'gm')
"""
hdf5.saveGraphicalModel(gm, f, d)
def loadGm(f, d='gm', operator='adder'):
""" save a graphical model to a hdf5 file:
Args:
f : filepath
g : dataset (defaut : 'gm')
operator : operator of the graphical model ('adder' / 'multiplier')
"""
if(operator=='adder'):
gm=adder.GraphicalModel()
elif(operator=='multiplier'):
gm=multiplier.GraphicalModel()
else:
raise RuntimeError("unknown operator: "+ operator)
hdf5.loadGraphicalModel(gm,f,d)
return gm
class TestModels(object):
@staticmethod
def chain3(nVar,nLabels):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-2):
f=numpy.random.rand(nLabels,nLabels,nLabels)
model.addFactor(model.addFunction(f),[x0,x0+1,x0+2])
return model
@staticmethod
def chain4(nVar,nLabels):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-3):
f=numpy.random.rand(nLabels,nLabels,nLabels,nLabels)
model.addFactor(model.addFunction(f),[x0,x0+1,x0+2,x0+3])
return model
@staticmethod
def chainN(nVar,nLabels,order,nSpecialUnaries=0,beta=1.0):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
for sn in range(nSpecialUnaries):
r=int(numpy.random.rand(1)*nVar-1)
rl=int(numpy.random.rand(1)*nLabels-1)
unaries[r,rl]=0.0
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-(order-1)):
f=numpy.random.rand( *([nLabels]*order))
f*=beta
vis=numpy.arange(order)
vis+=x0
model.addFactor(model.addFunction(f),vis)
return model
@staticmethod
def secondOrderGrid(dx,dy,nLabels):
nVar=dx*dy
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
vis2Order=secondOrderGridVis(dx,dy,True)
nF2=len(vis2Order)#.shape[0]
f2s=numpy.random.rand(nF2,nLabels)
model.addFactors(model.addFunctions(f2s),vis2Order)
return model
class GenericTimingVisitor(object):
def __init__(self,visitNth=1,reserve=0,verbose=True,multiline=True):
self.visitNth=visitNth
self.reserve=reserve
self.verbose=verbose
self.multiline=multiline
self.values_ = None
self.runtimes_ = None
self.bounds_ = None
self.iterations_ = None
self.t0 = None
self.t1 = None
self.iterNr = 0
def getValues(self):
return numpy.require(self.values_,dtype=value_type)
def getTimes(self):
return numpy.require(self.runtimes_,dtype=value_type)
def getBounds(self):
return numpy.require(self.bounds_,dtype=value_type)
def getIterations(self):
return numpy.require(self.iterations_,dtype=value_type)
def begin(self,inf):
v = inf.value()
b = inf.bound()
self.values_ =[v]
self.bounds_ =[b]
self.runtimes_ =[0.0]
self.iterations_=[self.iterNr]
if self.verbose :
print 'Begin : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# start the timing
self.t0 =time.time()
self.t1 =time.time()
def visit(self,inf):
if(self.iterNr==0 or self.iterNr%self.visitNth==0):
# "stop the timing"
self.t1=time.time()
# get the runtime of the run
rt=self.t1-self.t0
v = inf.value()
b = inf.bound()
if self.verbose :
print 'Step : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# store results
self.values_.append(v)
self.bounds_.append(b)
self.runtimes_.append(rt)
self.iterations_.append(self.iterNr)
# increment iteration number
self.iterNr+=1
# restart the timing
self.t0=time.time()
else:
# increment iteration number
self.iterNr+=1
def end(self,inf):
# "stop the timing"
self.t1=time.time()
# get the runtime of the run
rt=self.t1-self.t0
v = inf.value()
b = inf.bound()
if self.verbose :
print 'End : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# store results
self.values_.append(v)
self.bounds_.append(b)
self.runtimes_.append(rt)
self.iterations_.append(self.iterNr)
class __RandomFusion__(object):
def __init__(self,gm,accumulator=None,parameter=InfParam()):
if accumulator is None:
self.accumulator=defaultAccumulator(gm=gm)
else:
self.accumulator=accumulator
kwargs=parameter.kwargs
self.gm_=gm
self.steps = kwargs.get('steps', 100)
self.fusionSolver = kwargs.get('fuisionSolver', 'lf2')
self.arg_ = None
self.value_ = None
self.fusionMover=inference.adder.minimizer.FusionMover(self.gm_)
self.nLabels = self.gm_.numberOfLabels(0)
self.nVar = self.gm_.numberOfVariables
def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True):
return GenericTimingVisitor(visitNth,reserve,verbose,multiline)
def setStartingPoint(self,arg):
self.arg_=arg
self.value_=gm.evaluate(self.arg_)
def infer(self,visitor=None):
if(self.arg_ is None):
self.arg_ = numpy.zeros(self.gm_.numberOfVariables,dtype=label_type)
self.value_ = self.value_=self.gm_.evaluate(self.arg_)
# start inference
if visitor is not None:
visitor.begin(self)
# start fusion moves
for x in range(self.steps):
randState=numpy.random.randint(low=0, high=self.nLabels, size=self.nVar).astype(label_type)
r = self.fusionMover.fuse(self.arg_,randState,self.fusionSolver)
self.arg_=r[0]
self.value_=r[1]
visitor.visit(self)
# end inference
if visitor is not None:
visitor.end(self)
def name(self):
return "RandomFusion"
def bound(self):
return -1.0*float('inf')
def arg(self):
return self.arg_
def value(self):
return self.value_
class __CheapInitialization__(object):
def __init__(self,gm,accumulator=None,parameter=InfParam()):
if accumulator is None:
self.accumulator=defaultAccumulator(gm=gm)
else:
self.accumulator=accumulator
kwargs=parameter.kwargs
self.gm_=gm
self.arg_ = None
self.value_ = None
self.initType = kwargs.get('initType', 'localOpt')
def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True):
return GenericTimingVisitor(visitNth,reserve,verbose,multiline)
def setStartingPoint(self,arg):
self.arg_=arg
self.value_=gm.evaluate(self.arg_)
def infer(self,visitor=None):
if(self.arg_ is None):
self.arg_ = numpy.zeros(self.gm_.numberOfVariables,dtype=label_type)
self.value_ = self.value_=self.gm_.evaluate(self.arg_)
# start inference
if visitor is not None:
visitor.begin(self)
if(self.initType=='localOpt'):
print "move local opt"
self.arg_ = self.gm_.moveLocalOpt('minimizer')
print "done"
if visitor is not None:
visitor.visit(self)
# end inference
if visitor is not None:
visitor.end(self)
def name(self):
return "CheapInitialization"
def bound(self):
return -1.0*float('inf')
def arg(self):
return self.arg_
def value(self):
return self.value_
inference.__dict__['CheapInitialization']=__CheapInitialization__
inference.__dict__['RandomFusion']=__RandomFusion__
if __name__ == "__main__":
pass
|
CVML/opengm
|
src/interfaces/python/opengm/__init__.py
|
Python
|
mit
| 10,778
|
[
"VisIt"
] |
a4515fd23686cecde28cbd8cd837daf6d3adef97023673815c73670c67ca94df
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkHierarchicalDataExtractDataSets(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkHierarchicalDataExtractDataSets(), 'Processing.',
('vtkMultiGroupDataSet',), ('vtkMultiGroupDataSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkHierarchicalDataExtractDataSets.py
|
Python
|
bsd-3-clause
| 541
|
[
"VTK"
] |
df6aaaa7cdab94705d83ec89d966db2c835dc55599c17844bfc6c91b98f1a384
|
"""Make sure all the describe features are putting the right features in the
right place
"""
import mdtraj as md
import numpy as np
import pandas as pd
from mdtraj.testing import eq
from scipy.stats import vonmises as vm
from msmbuilder.example_datasets import MinimalFsPeptide
from msmbuilder.feature_selection import FeatureSelector
from msmbuilder.featurizer import DihedralFeaturizer, AlphaAngleFeaturizer, \
KappaAngleFeaturizer, ContactFeaturizer, VonMisesFeaturizer
trajectories = MinimalFsPeptide().get_cached().trajectories
top = trajectories[0].topology
if np.random.choice([True, False]):
atom_ind = [i.index for i in top.atoms
if i.residue.is_protein
and (i.residue.index in range(15)
or i.residue.index in range(20, 23))]
else:
atom_ind = [i.index for i in top.atoms]
def test_DihedralFeaturizer_describe_features():
feat = DihedralFeaturizer()
rnd_traj = np.random.randint(len(trajectories))
features = feat.transform([trajectories[rnd_traj]])
df = pd.DataFrame(feat.describe_features(trajectories[rnd_traj]))
for f in range(25):
f_index = np.random.choice(len(df))
atom_inds = df.iloc[f_index].atominds
feature_value = md.compute_dihedrals(trajectories[rnd_traj],
[atom_inds])
if feat.sincos:
func = getattr(np, '%s' % df.iloc[f_index].otherinfo)
feature_value = func(feature_value)
assert (features[0][:, f_index] == feature_value.flatten()).all()
def test_DihedralFeaturizer_describe_features_nosincos():
feat = DihedralFeaturizer(sincos=False)
rnd_traj = np.random.randint(len(trajectories))
features = feat.transform([trajectories[rnd_traj]])
df = pd.DataFrame(feat.describe_features(trajectories[rnd_traj]))
for f in range(25):
f_index = np.random.choice(len(df))
atom_inds = df.iloc[f_index].atominds
feature_value = md.compute_dihedrals(trajectories[rnd_traj],
[atom_inds])
if feat.sincos:
func = getattr(np, '%s' % df.iloc[f_index].otherinfo)
feature_value = func(feature_value)
assert (features[0][:, f_index] == feature_value.flatten()).all()
def test_AlphaFeaturizer_describe_features():
feat = AlphaAngleFeaturizer()
rnd_traj = np.random.randint(len(trajectories))
features = feat.transform([trajectories[rnd_traj]])
df = pd.DataFrame(feat.describe_features(trajectories[rnd_traj]))
for f in range(25):
f_index = np.random.choice(len(df))
atom_inds = df.iloc[f_index].atominds
feature_value = md.compute_dihedrals(trajectories[rnd_traj],
[atom_inds])
if feat.sincos:
func = getattr(np, '%s' % df.iloc[f_index].otherinfo)
feature_value = func(feature_value)
assert (features[0][:, f_index] == feature_value.flatten()).all()
def test_AlphaFeaturizer_describe_features_nosincos():
feat = AlphaAngleFeaturizer(sincos=False)
rnd_traj = np.random.randint(len(trajectories))
features = feat.transform([trajectories[rnd_traj]])
df = pd.DataFrame(feat.describe_features(trajectories[rnd_traj]))
for f in range(25):
f_index = np.random.choice(len(df))
atom_inds = df.iloc[f_index].atominds
feature_value = md.compute_dihedrals(trajectories[rnd_traj],
[atom_inds])
if feat.sincos:
func = getattr(np, '%s' % df.iloc[f_index].otherinfo)
feature_value = func(feature_value)
assert (features[0][:, f_index] == feature_value.flatten()).all()
def test_KappaFeaturizer_describe_features():
feat = KappaAngleFeaturizer()
rnd_traj = np.random.randint(len(trajectories))
features = feat.transform([trajectories[rnd_traj]])
df = pd.DataFrame(feat.describe_features(trajectories[rnd_traj]))
for f in range(25):
f_index = np.random.choice(len(df))
atom_inds = df.iloc[f_index].atominds
feature_value = md.compute_angles(trajectories[rnd_traj], [atom_inds])
if feat.cos:
func = getattr(np, '%s' % df.iloc[f_index].otherinfo)
feature_value = func(feature_value)
assert (features[0][:, f_index] == feature_value.flatten()).all()
def test_VonMisesFeaturizer_describe_features():
feat = VonMisesFeaturizer()
rnd_traj = np.random.randint(len(trajectories))
features = feat.transform([trajectories[rnd_traj]])
df = pd.DataFrame(feat.describe_features(trajectories[rnd_traj]))
for f in range(25):
f_index = np.random.choice(len(df))
atom_inds = df.iloc[f_index].atominds
bin_index = int(df.iloc[f_index].otherinfo.strip('bin-'))
dihedral_value = md.compute_dihedrals(trajectories[rnd_traj],
[atom_inds])
feature_value = [vm.pdf(i, loc=feat.loc, kappa=feat.kappa)[bin_index]
for i in dihedral_value]
assert (features[0][:, f_index] == feature_value).all()
def test_ContactFeaturizer_describe_features():
feat = ContactFeaturizer(scheme='CA', ignore_nonprotein=True)
rnd_traj = np.random.randint(len(trajectories))
features = feat.transform([trajectories[rnd_traj]])
df = pd.DataFrame(feat.describe_features(trajectories[rnd_traj]))
for f in range(25):
f_index = np.random.choice(len(df))
residue_ind = df.iloc[f_index].resids
feature_value, _ = md.compute_contacts(trajectories[rnd_traj],
contacts=[residue_ind],
scheme='ca',
ignore_nonprotein=True, )
assert (features[0][:, f_index] == feature_value.flatten()).all()
def test_FeatureSelector_describe_features():
rnd_traj = np.random.randint(len(trajectories))
f_ca = ContactFeaturizer(scheme='CA', ignore_nonprotein=True)
f1 = f_ca.transform([trajectories[rnd_traj]])
df1 = pd.DataFrame(f_ca.describe_features(trajectories[rnd_traj]))
f_dih = DihedralFeaturizer()
f2 = f_dih.transform([trajectories[rnd_traj]])
df2 = pd.DataFrame(f_dih.describe_features(trajectories[rnd_traj]))
df_dict = {}
df_dict["ca"] = df1
df_dict["dih"] = df2
f_comb = FeatureSelector([('ca', f_ca), ('dih', f_dih)])
f3 = f_comb.transform([trajectories[rnd_traj]])
df3 = pd.DataFrame(f_comb.describe_features(trajectories[rnd_traj]))
assert len(df3) == len(df1) + len(df2)
df4 = pd.concat([df_dict[i] for i in f_comb.feat_list])
# lets randomly compare 40 features
for i in np.random.choice(range(len(df3)), 40):
for j in df3.columns:
assert eq(df3.iloc[i][j], df4.iloc[i][j])
|
dr-nate/msmbuilder
|
msmbuilder/tests/test_feature_descriptor.py
|
Python
|
lgpl-2.1
| 6,930
|
[
"MDTraj"
] |
b26e0049ffae1d9720dec586ac2b8a441a01397e80bbd4cb5ec0a59bc20dfacf
|
# -*- coding utf-8-*-
"""
Created on Tue Nov 23 10:15:35 2018
@author: galad-loth
"""
import mxnet as mx
import logging
import sys
from hash_net import get_ssdh_symbol
from evaluate_metric import MyAccuracy
from data import get_img_class_iter
root_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
root_logger.addHandler(stdout_handler)
root_logger.setLevel(logging.INFO)
def train_ssdh():
pretrain_model=(r'D:\Pretrained\mxnet\Inception-BN')
load_net, load_arg_params, load_aux_params = \
mx.model.load_checkpoint(pretrain_model, 126)
new_net, load_args=get_ssdh_symbol(load_net,load_arg_params,512,45)
batch_size=10
datadir=r"D:\Jilan_Work\DevProj\_Datasets\NWPU-RESISC45\images"
trainIter,valIter, cls_dict=get_img_class_iter(datadir,(batch_size,3,256,256),True,0.4)
model = mx.mod.Module(symbol= new_net, context= mx.gpu())
optimizer = mx.optimizer.create('sgd',
rescale_grad=1.0/batch_size,
learning_rate =0.01,
momentum = 0.9,
wd = 0.0005,
lr_scheduler=mx.lr_scheduler.FactorScheduler(250,0.9))
new_net_args=new_net.list_arguments()
lr_scale={}
for arg_name in new_net_args:
if "ssdh" in arg_name:
lr_scale[arg_name] = 10
optimizer.set_lr_mult(lr_scale)
initializer = mx.init.Xavier(rnd_type='gaussian',
factor_type="in",
magnitude=2)
model_prefix="checkpoint\\ssdh"
checkpoint = mx.callback.do_checkpoint(model_prefix)
eval_metric=MyAccuracy()
model.fit(trainIter,
begin_epoch=0,
num_epoch=2,
eval_data=valIter,
eval_metric=eval_metric,
optimizer=optimizer,
initializer=initializer,
arg_params= load_args,
aux_params= load_aux_params,
batch_end_callback = mx.callback.Speedometer(batch_size, 5),
allow_missing = True,
epoch_end_callback=checkpoint)
if __name__=="__main__":
train_ssdh()
# test_deep_compare()
|
galad-loth/LearnDescriptor
|
deephash/train_model.py
|
Python
|
apache-2.0
| 2,364
|
[
"Gaussian"
] |
c8ac29f8f9504e330774d3b2830766aaee02bc9de72f7b9e59516d971bd53682
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`detrender.py` - De-trending models
-------------------------------------------
This module contains the generic models used to de-trend light curves for
the various supported missions. Most of the functionality is implemented in
:py:class:`Detrender`, and specific de-trending methods are implemented as
subclasses. The default :py:obj:`everest` model is :py:class:`nPLD`.
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from . import missions
from .basecamp import Basecamp
from .config import EVEREST_DAT
from .utils import InitLog, Formatter, AP_SATURATED_PIXEL, AP_COLLAPSED_PIXEL
from .mathutils import Chunks, Scatter, SavGol, Interpolate
from .fits import MakeFITS
from .gp import GetCovariance, GetKernelParams, GP
from .dvs import DVS, CBV
import os
import sys
import numpy as np
import george
from scipy.optimize import fmin_powell
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
from matplotlib.backends.backend_pdf import PdfPages
from PyPDF2 import PdfFileReader, PdfFileWriter
import traceback
import logging
log = logging.getLogger(__name__)
__all__ = ['Detrender', 'rPLD', 'nPLD', 'iPLD', 'pPLD']
class Detrender(Basecamp):
'''
A generic *PLD* model with scalar matrix *L2* regularization. Includes
functionality for loading pixel-level light curves, identifying outliers,
generating the data covariance matrix, computing the regularized pixel
model, and plotting the results.
Specific models are implemented as subclasses.
**General:**
:param ID: The target star ID (*EPIC*, *KIC*, or *TIC* number, \
for instance)
:param str cadence: The cadence of the observations. Default :py:obj:`lc`
:param bool clobber: Overwrite existing :py:obj:`everest` models? Default \
:py:obj:`False`
:param bool clobber_tpf: Download and overwrite the saved raw TPF data? \
Default :py:obj:`False`
:param bool debug: De-trend in debug mode? If :py:obj:`True`, prints all \
output to screen and enters :py:obj:`pdb` post-mortem mode for \
debugging when an error is raised. Default :py:obj:`False`
:param str mission: The name of the mission. Default `k2`
**Detrender:**
:param str aperture_name: The name of the aperture to use. These are \
defined in the datasets and are mission specific. Defaults to \
the mission default
:param int bpad: When light curve breakpoints are set, the light curve \
chunks must be stitched together at the end. To prevent kinks \
and/or discontinuities, the chunks are made to overlap by \
:py:obj:`bpad` cadences on either end. The chunks are then \
mended and the overlap is discarded. Default 100
:param breakpoints: Add light curve breakpoints when de-trending? If \
:py:obj:`True`, splits the light curve into chunks and de-trends \
each one separately, then stitches them back and the end. This is \
useful for missions like *K2*, where the light curve noise \
properties are very different at the beginning and end of each \
campaign. The cadences at which breakpoints are inserted are \
specified in the :py:func:`Breakpoints` function \
of each mission. Alternatively, the user may specify a list of \
cadences at which to break up the light curve. Default \
:py:obj:`True`
:param int cbv_num: The number of CBVs to regress on during \
post-processing. Default 1
:param int cbv_niter: The number of :py:obj:`SysRem` iterations to \
perform when computing CBVs. Default 50
:param int cbv_win: The filter window size (in cadences) for smoothing \
the CBVs. Default 999
:param int cbv_order: The filter order for smoothing CBVs. Default 3
:param int cdivs: The number of light curve subdivisions when \
cross-validating. During each iteration, one of these subdivisions \
will be masked and used as the validation set. Default 3
:param str cv_min: The quantity to be minimized during cross-validation. \
Default `MAD` (median absolute deviation). Can also be set to \
`TV` (total variation).
:param int giter: The number of iterations when optimizing the GP. \
During each iteration, the minimizer is initialized with a \
perturbed guess; after :py:obj:`giter` iterations, the step with \
the highest likelihood is kept. Default 3
:param int gmaxf: The maximum number of function evaluations when \
optimizing the GP. Default 200
:param float gp_factor: When computing the initial kernel parameters, \
the red noise amplitude is set to the standard deviation of the \
data times this factor. Larger values generally help with \
convergence, particularly for very variable stars. Default 100
:param array_like kernel_params: The initial value of the \
:py:obj:`Matern-3/2` kernel parameters \
(white noise amplitude in flux units, red noise amplitude in \
flux units, and timescale in days). Default :py:obj:`None` \
(determined from the data)
:param bool get_hires: Download a high resolution image of the target? \
Default :py:obj:`True`
:param bool get_nearby: Retrieve the location of nearby sources? \
Default :py:obj:`True`
:param array_like lambda_arr: The array of :math:`\Lambda` values to \
iterate over during the cross-validation step. :math:`\Lambda` \
is the regularization parameter, or the standard deviation of \
the Gaussian prior on the weights for each order of PLD. \
Default ``10 ** np.arange(0,18,0.5)``
:param float leps: The fractional tolerance when optimizing \
:math:`\Lambda`. The chosen value of :math:`\Lambda` will be \
within this amount of the minimum of the CDPP curve. \
Default 0.05
:param int max_pixels: The maximum number of pixels. Very large apertures \
are likely to cause memory errors, particularly for high order \
PLD. If the chosen aperture exceeds this many \
pixels, a different aperture is chosen from the dataset. If no \
apertures with fewer than this many pixels are available, an error \
is thrown. Default 75
:param bool optimize_gp: Perform the GP optimization steps? \
Default :py:obj:`True`
:param float osigma: The outlier standard deviation threshold. Default 5
:param int oiter: The maximum number of steps taken during iterative \
sigma clipping. Default 10
:param planets: Any transiting planets/EBs that should be explicitly \
masked during cross-validation. It is not \
usually necessary to specify these at the cross-validation stage, \
since deep transits are masked as outliers and shallow transits \
do not affect the lambda optimization. However, it *is* necessary \
to mask deep transits in short cadence mode, since these can \
heavily bias the cross-validation scheme to lower values of \
lambda, leading to severe underfitting. \
This parameter should be a tuple or a list of tuples in the \
form (`t0`, `period`, `duration`) \
for each of the planets to be masked (all values in days).
:param int pld_order: The pixel level decorrelation order. Default `3`. \
Higher orders may cause memory errors
:param str saturated_aperture_name: If the target is found to be \
saturated, de-trending is performed \
on this aperture instead. Defaults to the mission default
:param float saturation_tolerance: The tolerance when determining whether \
or not to collapse a column in the aperture. The column collapsing \
is implemented in the individual mission modules. Default -0.1, \
i.e., if a target is 10% shy of the nominal saturation level, it
is considered to be saturated.
:param transit_model: An instance or list of instances of \
:py:class:`everest.transit.TransitModel`. If specified, \
:py:obj:`everest` will include these in the regression when \
calculating the PLD coefficients. The final instrumental light \
curve model will **not** include the transit fits -- they are used \
solely to obtain unbiased PLD coefficients. The best fit transit \
depths from the fit are stored \
in the :py:obj:`transit_depth` attribute of the model. \
Default :py:obj:`None`.
'''
def __init__(self, ID, **kwargs):
'''
'''
# Initialize logging
self.ID = ID
if kwargs.get('season', None) is not None:
self._season = kwargs.get('season')
if hasattr(self._season, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s."
% self._season)
self._data = kwargs.get('data', None)
self.cadence = kwargs.get('cadence', 'lc').lower()
if self.cadence not in ['lc', 'sc']:
raise ValueError("Invalid cadence selected.")
self.mission = kwargs.get('mission', 'k2')
self.clobber = kwargs.get('clobber', False)
self.debug = kwargs.get('debug', False)
self.is_parent = kwargs.get('is_parent', False)
if not self.is_parent:
screen_level = kwargs.get('screen_level', logging.CRITICAL)
log_level = kwargs.get('log_level', logging.DEBUG)
InitLog(self.logfile, log_level, screen_level, self.debug)
log.info("Initializing %s model for %d." % (self.name, self.ID))
# If this is a short cadence light curve, get the
# GP params from the long cadence model. It would
# take way too long and too much memory to optimize
# the GP based on the short cadence light curve
if self.cadence == 'sc':
kernel_params = kwargs.get('kernel_params', None)
if kernel_params is None:
log.info("Loading long cadence model...")
kwcpy = dict(kwargs)
kwcpy.pop('cadence', None)
kwcpy.pop('clobber', None)
lc = self.__class__(ID, is_parent=True, **kwcpy)
kernel_params = np.array(lc.kernel_params)
del lc
kwargs.update(
{'kernel_params': kernel_params, 'optimize_gp': False})
# Read general model kwargs
self.lambda_arr = kwargs.get('lambda_arr', 10 ** np.arange(0, 18, 0.5))
if self.lambda_arr[0] != 0:
self.lambda_arr = np.append(0, self.lambda_arr)
self.leps = kwargs.get('leps', 0.05)
self.osigma = kwargs.get('osigma', 5)
self.oiter = kwargs.get('oiter', 10)
self.cdivs = kwargs.get('cdivs', 3)
self.giter = kwargs.get('giter', 3)
self.gmaxf = kwargs.get('gmaxf', 200)
self.optimize_gp = kwargs.get('optimize_gp', True)
self.kernel_params = kwargs.get('kernel_params', None)
self.kernel = kwargs.get('kernel', 'Basic')
assert self.kernel in ['Basic', 'QuasiPeriodic'], \
"Kwarg `kernel` must be one of `Basic` or `QuasiPeriodic`."
self.clobber_tpf = kwargs.get('clobber_tpf', False)
self.bpad = kwargs.get('bpad', 100)
self.aperture_name = kwargs.get('aperture', None)
self.saturated_aperture_name = kwargs.get('saturated_aperture', None)
self.max_pixels = kwargs.get('max_pixels', 75)
self.saturation_tolerance = kwargs.get('saturation_tolerance', -0.1)
self.gp_factor = kwargs.get('gp_factor', 100.)
self.get_hires = kwargs.get('get_hires', True)
self.get_nearby = kwargs.get('get_nearby', True)
self.planets = kwargs.get('planets', [])
if type(self.planets) is tuple and len(self.planets) == 3 and \
not hasattr(self.planets[0], '__len__'):
self.planets = [self.planets]
for planet in self.planets:
assert len(planet) == 3, \
"Planets must be provided as (`t0`, `per`, `dur`) tuples."
# Handle breakpointing. The breakpoint is the *last* index of each
# light curve chunk.
bkpts = kwargs.get('breakpoints', True)
if bkpts is True:
self.breakpoints = np.append(self._mission.Breakpoints(
self.ID, season=self.season, cadence=self.cadence), [999999])
elif hasattr(bkpts, '__len__'):
self.breakpoints = np.append(bkpts, [999999])
else:
self.breakpoints = np.array([999999])
nseg = len(self.breakpoints)
self.cv_min = kwargs.get('cv_min', 'mad').lower()
assert self.cv_min in ['mad', 'tv'], "Invalid value for `cv_min`."
self.cbv_num = kwargs.get('cbv_num', 1)
self.cbv_niter = kwargs.get('cbv_niter', 50)
self.cbv_win = kwargs.get('cbv_win', 999)
self.cbv_order = kwargs.get('cbv_order', 3)
# Get the pld order
pld_order = kwargs.get('pld_order', 3)
assert (pld_order > 0), "Invalid value for the de-trending order."
self.pld_order = pld_order
# Get the transit model
self._transit_model = kwargs.get('transit_model', None)
# Initialize model params
self.lam_idx = -1
self.lam = [[1e5] + [None for i in range(self.pld_order - 1)]
for b in range(nseg)]
self.reclam = None
self.recmask = []
self.X1N = None
self.XCBV = None
self.cdpp_arr = np.array([np.nan for b in range(nseg)])
self.cdppr_arr = np.array([np.nan for b in range(nseg)])
self.cdppv_arr = np.array([np.nan for b in range(nseg)])
self.cdpp = np.nan
self.cdppr = np.nan
self.cdppv = np.nan
self.cdppg = np.nan
self.neighbors = []
self.loaded = False
self._weights = None
# Initialize plotting
self.dvs = DVS(len(self.breakpoints), pld_order=self.pld_order)
# Check for saved model
if self.load_model():
return
# Setup (subclass-specific)
self.setup(**kwargs)
# Run
self.run()
@property
def name(self):
'''
Returns the name of the current :py:class:`Detrender` subclass.
'''
if self.cadence == 'lc':
return self.__class__.__name__
else:
return '%s.sc' % self.__class__.__name__
@name.setter
def name(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
def setup(self, **kwargs):
'''
A subclass-specific routine.
'''
pass
def cv_precompute(self, mask, b):
'''
Pre-compute the matrices :py:obj:`A` and :py:obj:`B`
(cross-validation step only)
for chunk :py:obj:`b`.
'''
# Get current chunk and mask outliers
m1 = self.get_masked_chunk(b)
flux = self.fraw[m1]
K = GetCovariance(self.kernel, self.kernel_params,
self.time[m1], self.fraw_err[m1])
med = np.nanmedian(flux)
# Now mask the validation set
M = lambda x, axis = 0: np.delete(x, mask, axis=axis)
m2 = M(m1)
mK = M(M(K, axis=0), axis=1)
f = M(flux) - med
# Pre-compute the matrices
A = [None for i in range(self.pld_order)]
B = [None for i in range(self.pld_order)]
for n in range(self.pld_order):
# Only compute up to the current PLD order
if self.lam_idx >= n:
X2 = self.X(n, m2)
X1 = self.X(n, m1)
A[n] = np.dot(X2, X2.T)
B[n] = np.dot(X1, X2.T)
del X1, X2
if self.transit_model is None:
C = 0
else:
C = np.zeros((len(m2), len(m2)))
mean_transit_model = med * \
np.sum([tm.depth * tm(self.time[m2])
for tm in self.transit_model], axis=0)
f -= mean_transit_model
for tm in self.transit_model:
X2 = tm(self.time[m2]).reshape(-1, 1)
C += tm.var_depth * np.dot(X2, X2.T)
del X2
return A, B, C, mK, f, m1, m2
def cv_compute(self, b, A, B, C, mK, f, m1, m2):
'''
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
'''
A = np.sum([l * a for l, a in zip(self.lam[b], A)
if l is not None], axis=0)
B = np.sum([l * b for l, b in zip(self.lam[b], B)
if l is not None], axis=0)
W = np.linalg.solve(mK + A + C, f)
if self.transit_model is None:
model = np.dot(B, W)
else:
w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W)
for n, l in enumerate(self.lam[b])
if l is not None])
model = np.dot(np.hstack(
[self.X(n, m1) for n, l in enumerate(self.lam[b])
if l is not None]), w_pld)
model -= np.nanmedian(model)
return model
def get_outliers(self):
'''
Performs iterative sigma clipping to get outliers.
'''
log.info("Clipping outliers...")
log.info('Iter %d/%d: %d outliers' %
(0, self.oiter, len(self.outmask)))
def M(x): return np.delete(x, np.concatenate(
[self.nanmask, self.badmask, self.transitmask]), axis=0)
t = M(self.time)
outmask = [np.array([-1]), np.array(self.outmask)]
# Loop as long as the last two outlier arrays aren't equal
while not np.array_equal(outmask[-2], outmask[-1]):
# Check if we've done this too many times
if len(outmask) - 1 > self.oiter:
log.error('Maximum number of iterations in ' +
'``get_outliers()`` exceeded. Skipping...')
break
# Check if we're going in circles
if np.any([np.array_equal(outmask[-1], i) for i in outmask[:-1]]):
log.error('Function ``get_outliers()`` ' +
'is going in circles. Skipping...')
break
# Compute the model to get the flux
self.compute()
# Get the outliers
f = SavGol(M(self.flux))
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
inds = np.where((f > med + self.osigma * MAD) |
(f < med - self.osigma * MAD))[0]
# Project onto unmasked time array
inds = np.array([np.argmax(self.time == t[i]) for i in inds])
self.outmask = np.array(inds, dtype=int)
# Add them to the running list
outmask.append(np.array(inds))
# Log
log.info('Iter %d/%d: %d outliers' %
(len(outmask) - 2, self.oiter, len(self.outmask)))
def optimize_lambda(self, validation):
'''
Returns the index of :py:attr:`self.lambda_arr` that minimizes the
validation scatter in the segment with minimum at the lowest value
of :py:obj:`lambda`, with
fractional tolerance :py:attr:`self.leps`.
:param numpy.ndarray validation: The scatter in the validation set \
as a function of :py:obj:`lambda`
'''
maxm = 0
minr = len(validation)
for n in range(validation.shape[1]):
# The index that minimizes the scatter for this segment
m = np.nanargmin(validation[:, n])
if m > maxm:
# The largest of the `m`s.
maxm = m
# The largest index with validation scatter within
# `self.leps` of the minimum for this segment
r = np.where((validation[:, n] - validation[m, n]) /
validation[m, n] <= self.leps)[0][-1]
if r < minr:
# The smallest of the `r`s
minr = r
return min(maxm, minr)
def fobj(self, y, y0, t, gp, mask):
'''
'''
if self.cv_min == 'mad':
# Note that we're computing the MAD, not the
# standard deviation, as this handles extremely variable
# stars much better!
gpm, _ = gp.predict(y - y0, t[mask])
fdet = (y[mask] - gpm) / y0
scatter = 1.e6 * \
(1.4826 * np.nanmedian(np.abs(fdet - np.nanmedian(fdet))) /
np.sqrt(len(mask)))
return scatter
elif self.cv_min == 'tv':
# We're going to minimize the total variation instead
return 1.e6 * np.sum(np.abs(np.diff(y[mask]))) / len(mask) / y0
def cross_validate(self, ax, info=''):
'''
Cross-validate to find the optimal value of :py:obj:`lambda`.
:param ax: The current :py:obj:`matplotlib.pyplot` axis instance to \
plot the cross-validation results.
:param str info: The label to show in the bottom right-hand corner \
of the plot. Default `''`
'''
# Loop over all chunks
ax = np.atleast_1d(ax)
for b, brkpt in enumerate(self.breakpoints):
log.info("Cross-validating chunk %d/%d..." %
(b + 1, len(self.breakpoints)))
med_training = np.zeros_like(self.lambda_arr)
med_validation = np.zeros_like(self.lambda_arr)
# Mask for current chunk
m = self.get_masked_chunk(b)
# Check that we have enough data
if len(m) < 3 * self.cdivs:
self.cdppv_arr[b] = np.nan
self.lam[b][self.lam_idx] = 0.
log.info(
"Insufficient data to run cross-validation on this chunk.")
continue
# Mask transits and outliers
time = self.time[m]
flux = self.fraw[m]
ferr = self.fraw_err[m]
med = np.nanmedian(flux)
# The precision in the validation set
validation = [[] for k, _ in enumerate(self.lambda_arr)]
# The precision in the training set
training = [[] for k, _ in enumerate(self.lambda_arr)]
# Setup the GP
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(time, ferr)
# The masks
masks = list(Chunks(np.arange(0, len(time)),
len(time) // self.cdivs))
# Loop over the different masks
for i, mask in enumerate(masks):
log.info("Section %d/%d..." % (i + 1, len(masks)))
# Pre-compute (training set)
pre_t = self.cv_precompute([], b)
# Pre-compute (validation set)
pre_v = self.cv_precompute(mask, b)
# Iterate over lambda
for k, lam in enumerate(self.lambda_arr):
# Update the lambda matrix
self.lam[b][self.lam_idx] = lam
# Training set
model = self.cv_compute(b, *pre_t)
training[k].append(
self.fobj(flux - model, med, time, gp, mask))
# Validation set
model = self.cv_compute(b, *pre_v)
validation[k].append(
self.fobj(flux - model, med, time, gp, mask))
# Finalize
training = np.array(training)
validation = np.array(validation)
for k, _ in enumerate(self.lambda_arr):
# Take the mean
med_validation[k] = np.nanmean(validation[k])
med_training[k] = np.nanmean(training[k])
# Compute best model
i = self.optimize_lambda(validation)
v_best = med_validation[i]
t_best = med_training[i]
self.cdppv_arr[b] = v_best / t_best
self.lam[b][self.lam_idx] = self.lambda_arr[i]
log.info("Found optimum solution at log(lambda) = %.1f." %
np.log10(self.lam[b][self.lam_idx]))
# Plotting: There's not enough space in the DVS to show the
# cross-val results for more than three light curve segments.
if len(self.breakpoints) <= 3:
# Plotting hack: first x tick will be -infty
lambda_arr = np.array(self.lambda_arr)
lambda_arr[0] = 10 ** (np.log10(lambda_arr[1]) - 3)
# Plot cross-val
for n in range(len(masks)):
ax[b].plot(np.log10(lambda_arr),
validation[:, n], 'r-', alpha=0.3)
ax[b].plot(np.log10(lambda_arr),
med_training, 'b-', lw=1., alpha=1)
ax[b].plot(np.log10(lambda_arr),
med_validation, 'r-', lw=1., alpha=1)
ax[b].axvline(np.log10(self.lam[b][self.lam_idx]),
color='k', ls='--', lw=0.75, alpha=0.75)
ax[b].axhline(v_best, color='k', ls='--', lw=0.75, alpha=0.75)
ax[b].set_ylabel(r'Scatter (ppm)', fontsize=5)
hi = np.max(validation[0])
lo = np.min(training)
rng = (hi - lo)
ax[b].set_ylim(lo - 0.15 * rng, hi + 0.15 * rng)
if rng > 2:
ax[b].get_yaxis().set_major_formatter(Formatter.CDPP)
ax[b].get_yaxis().set_major_locator(
MaxNLocator(4, integer=True))
elif rng > 0.2:
ax[b].get_yaxis().set_major_formatter(Formatter.CDPP1F)
ax[b].get_yaxis().set_major_locator(MaxNLocator(4))
else:
ax[b].get_yaxis().set_major_formatter(Formatter.CDPP2F)
ax[b].get_yaxis().set_major_locator(MaxNLocator(4))
# Fix the x ticks
xticks = [np.log10(lambda_arr[0])] + list(np.linspace(
np.log10(lambda_arr[1]), np.log10(lambda_arr[-1]), 6))
ax[b].set_xticks(xticks)
ax[b].set_xticklabels(['' for x in xticks])
pad = 0.01 * \
(np.log10(lambda_arr[-1]) - np.log10(lambda_arr[0]))
ax[b].set_xlim(np.log10(lambda_arr[0]) - pad,
np.log10(lambda_arr[-1]) + pad)
ax[b].annotate('%s.%d' % (info, b), xy=(0.02, 0.025),
xycoords='axes fraction',
ha='left', va='bottom', fontsize=7, alpha=0.25,
fontweight='bold')
# Finally, compute the model
self.compute()
# Tidy up
if len(ax) == 2:
ax[0].xaxis.set_ticks_position('top')
for axis in ax[1:]:
axis.spines['top'].set_visible(False)
axis.xaxis.set_ticks_position('bottom')
if len(self.breakpoints) <= 3:
# A hack to mark the first xtick as -infty
labels = ['%.1f' % x for x in xticks]
labels[0] = r'$-\infty$'
ax[-1].set_xticklabels(labels)
ax[-1].set_xlabel(r'Log $\Lambda$', fontsize=5)
else:
# We're just going to plot lambda as a function of chunk number
bs = np.arange(len(self.breakpoints))
ax[0].plot(bs + 1, [np.log10(self.lam[b][self.lam_idx])
for b in bs], 'r.')
ax[0].plot(bs + 1, [np.log10(self.lam[b][self.lam_idx])
for b in bs], 'r-', alpha=0.25)
ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5)
ax[0].margins(0.1, 0.1)
ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1))
ax[0].set_xticklabels([])
# Now plot the CDPP and approximate validation CDPP
cdpp_arr = self.get_cdpp_arr()
cdppv_arr = self.cdppv_arr * cdpp_arr
ax[1].plot(bs + 1, cdpp_arr, 'b.')
ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25)
ax[1].plot(bs + 1, cdppv_arr, 'r.')
ax[1].plot(bs + 1, cdppv_arr, 'r-', alpha=0.25)
ax[1].margins(0.1, 0.1)
ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5)
ax[1].set_xlabel(r'Chunk', fontsize=5)
if len(self.breakpoints) < 15:
ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1))
else:
ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1, 2))
def finalize(self):
'''
This method is called at the end of the de-trending, prior to
plotting the final results.
Subclass it to add custom functionality to individual models.
'''
pass
def get_ylim(self):
'''
Computes the ideal y-axis limits for the light curve plot. Attempts to
set the limits equal to those of the raw light curve, but if more than
1% of the flux lies either above or below these limits, auto-expands
to include those points. At the end, adds 5% padding to both the
top and the bottom.
'''
bn = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
fraw = np.delete(self.fraw, bn)
lo, hi = fraw[np.argsort(fraw)][[3, -3]]
flux = np.delete(self.flux, bn)
fsort = flux[np.argsort(flux)]
if fsort[int(0.01 * len(fsort))] < lo:
lo = fsort[int(0.01 * len(fsort))]
if fsort[int(0.99 * len(fsort))] > hi:
hi = fsort[int(0.99 * len(fsort))]
pad = (hi - lo) * 0.05
ylim = (lo - pad, hi + pad)
return ylim
def plot_lc(self, ax, info_left='', info_right='', color='b'):
'''
Plots the current light curve. This is called at several stages to
plot the de-trending progress as a function of the different
*PLD* orders.
:param ax: The current :py:obj:`matplotlib.pyplot` axis instance
:param str info_left: Information to display at the left of the \
plot. Default `''`
:param str info_right: Information to display at the right of the \
plot. Default `''`
:param str color: The color of the data points. Default `'b'`
'''
# Plot
if (self.cadence == 'lc') or (len(self.time) < 4000):
ax.plot(self.apply_mask(self.time), self.apply_mask(self.flux),
ls='none', marker='.', color=color,
markersize=2, alpha=0.5)
ax.plot(self.time[self.transitmask], self.flux[self.transitmask],
ls='none', marker='.', color=color,
markersize=2, alpha=0.5)
else:
ax.plot(self.apply_mask(self.time), self.apply_mask(
self.flux), ls='none', marker='.', color=color,
markersize=2, alpha=0.03, zorder=-1)
ax.plot(self.time[self.transitmask], self.flux[self.transitmask],
ls='none', marker='.', color=color,
markersize=2, alpha=0.03, zorder=-1)
ax.set_rasterization_zorder(0)
ylim = self.get_ylim()
# Plot the outliers, but not the NaNs
badmask = [i for i in self.badmask if i not in self.nanmask]
def O1(x): return x[self.outmask]
def O2(x): return x[badmask]
if self.cadence == 'lc':
ax.plot(O1(self.time), O1(self.flux), ls='none',
color="#777777", marker='.', markersize=2, alpha=0.5)
ax.plot(O2(self.time), O2(self.flux),
'r.', markersize=2, alpha=0.25)
else:
ax.plot(O1(self.time), O1(self.flux), ls='none', color="#777777",
marker='.', markersize=2, alpha=0.25, zorder=-1)
ax.plot(O2(self.time), O2(self.flux), 'r.',
markersize=2, alpha=0.125, zorder=-1)
for i in np.where(self.flux < ylim[0])[0]:
if i in badmask:
color = "#ffcccc"
elif i in self.outmask:
color = "#cccccc"
elif i in self.nanmask:
continue
else:
color = "#ccccff"
ax.annotate('', xy=(self.time[i], ylim[0]), xycoords='data',
xytext=(0, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color))
for i in np.where(self.flux > ylim[1])[0]:
if i in badmask:
color = "#ffcccc"
elif i in self.outmask:
color = "#cccccc"
elif i in self.nanmask:
continue
else:
color = "#ccccff"
ax.annotate('', xy=(self.time[i], ylim[1]), xycoords='data',
xytext=(0, -15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color))
# Plot the breakpoints
for brkpt in self.breakpoints[:-1]:
if len(self.breakpoints) <= 5:
ax.axvline(self.time[brkpt], color='r', ls='--', alpha=0.5)
else:
ax.axvline(self.time[brkpt], color='r', ls='-', alpha=0.025)
# Appearance
if len(self.cdpp_arr) == 2:
ax.annotate('%.2f ppm' % self.cdpp_arr[0], xy=(0.02, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=10)
ax.annotate('%.2f ppm' % self.cdpp_arr[1], xy=(0.98, 0.975),
xycoords='axes fraction',
ha='right', va='top', fontsize=10)
elif len(self.cdpp_arr) < 6:
for n in range(len(self.cdpp_arr)):
if n > 0:
x = (self.time[self.breakpoints[n - 1]] - self.time[0]
) / (self.time[-1] - self.time[0]) + 0.02
else:
x = 0.02
ax.annotate('%.2f ppm' % self.cdpp_arr[n], xy=(x, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=8)
else:
ax.annotate('%.2f ppm' % self.cdpp, xy=(0.02, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=10)
ax.annotate(info_right, xy=(0.98, 0.025), xycoords='axes fraction',
ha='right', va='bottom', fontsize=10, alpha=0.5,
fontweight='bold')
ax.annotate(info_left, xy=(0.02, 0.025), xycoords='axes fraction',
ha='left', va='bottom', fontsize=8)
ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=5)
ax.margins(0.01, 0.1)
ax.set_ylim(*ylim)
ax.get_yaxis().set_major_formatter(Formatter.Flux)
def plot_final(self, ax):
'''
Plots the final de-trended light curve.
'''
# Plot the light curve
bnmask = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
def M(x): return np.delete(x, bnmask)
if (self.cadence == 'lc') or (len(self.time) < 4000):
ax.plot(M(self.time), M(self.flux), ls='none',
marker='.', color='k', markersize=2, alpha=0.3)
else:
ax.plot(M(self.time), M(self.flux), ls='none', marker='.',
color='k', markersize=2, alpha=0.03, zorder=-1)
ax.set_rasterization_zorder(0)
# Hack: Plot invisible first and last points to ensure
# the x axis limits are the
# same in the other plots, where we also plot outliers!
ax.plot(self.time[0], np.nanmedian(M(self.flux)), marker='.', alpha=0)
ax.plot(self.time[-1], np.nanmedian(M(self.flux)), marker='.', alpha=0)
# Plot the GP (long cadence only)
if self.cadence == 'lc':
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(self.apply_mask(self.time),
self.apply_mask(self.fraw_err))
med = np.nanmedian(self.apply_mask(self.flux))
y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time)
y += med
ax.plot(M(self.time), M(y), 'r-', lw=0.5, alpha=0.5)
# Compute the CDPP of the GP-detrended flux
self.cdppg = self._mission.CDPP(self.apply_mask(
self.flux - y + med), cadence=self.cadence)
else:
# We're not going to calculate this
self.cdppg = 0.
# Appearance
ax.annotate('Final', xy=(0.98, 0.025), xycoords='axes fraction',
ha='right', va='bottom', fontsize=10, alpha=0.5,
fontweight='bold')
ax.margins(0.01, 0.1)
# Get y lims that bound 99% of the flux
flux = np.delete(self.flux, bnmask)
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
fsort = flux[np.argsort(flux)]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
ax.get_yaxis().set_major_formatter(Formatter.Flux)
def plot_cbv(self, ax, flux, info, show_cbv=False):
'''
Plots the final CBV-corrected light curve.
'''
# Plot the light curve
bnmask = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
def M(x): return np.delete(x, bnmask)
if self.cadence == 'lc':
ax.plot(M(self.time), M(flux), ls='none', marker='.',
color='k', markersize=2, alpha=0.45)
else:
ax.plot(M(self.time), M(flux), ls='none', marker='.',
color='k', markersize=2, alpha=0.03, zorder=-1)
ax.set_rasterization_zorder(0)
# Hack: Plot invisible first and last points to ensure
# the x axis limits are the
# same in the other plots, where we also plot outliers!
ax.plot(self.time[0], np.nanmedian(M(flux)), marker='.', alpha=0)
ax.plot(self.time[-1], np.nanmedian(M(flux)), marker='.', alpha=0)
# Show CBV fit?
if show_cbv:
ax.plot(self.time, self._mission.FitCBVs(
self) + np.nanmedian(flux), 'r-', alpha=0.2)
# Appearance
ax.annotate(info, xy=(0.98, 0.025), xycoords='axes fraction',
ha='right', va='bottom', fontsize=10, alpha=0.5,
fontweight='bold')
ax.margins(0.01, 0.1)
# Get y lims that bound 99% of the flux
flux = np.delete(flux, bnmask)
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
fsort = flux[np.argsort(flux)]
pad = (hi - lo) * 0.2
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
ax.get_yaxis().set_major_formatter(Formatter.Flux)
ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=9)
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(7)
def load_tpf(self):
'''
Loads the target pixel file.
'''
if not self.loaded:
if self._data is not None:
data = self._data
else:
data = self._mission.GetData(
self.ID, season=self.season,
cadence=self.cadence,
clobber=self.clobber_tpf,
aperture_name=self.aperture_name,
saturated_aperture_name=self.saturated_aperture_name,
max_pixels=self.max_pixels,
saturation_tolerance=self.saturation_tolerance,
get_hires=self.get_hires,
get_nearby=self.get_nearby)
if data is None:
raise Exception("Unable to retrieve target data.")
self.cadn = data.cadn
self.time = data.time
self.model = np.zeros_like(self.time)
self.fpix = data.fpix
self.fraw = np.sum(self.fpix, axis=1)
self.fpix_err = data.fpix_err
self.fraw_err = np.sqrt(np.sum(self.fpix_err ** 2, axis=1))
self.nanmask = data.nanmask
self.badmask = data.badmask
self.transitmask = np.array([], dtype=int)
self.outmask = np.array([], dtype=int)
self.aperture = data.aperture
self.aperture_name = data.aperture_name
self.apertures = data.apertures
self.quality = data.quality
self.Xpos = data.Xpos
self.Ypos = data.Ypos
self.mag = data.mag
self.pixel_images = data.pixel_images
self.nearby = data.nearby
self.hires = data.hires
self.saturated = data.saturated
self.meta = data.meta
self.bkg = data.bkg
# Update the last breakpoint to the correct value
self.breakpoints[-1] = len(self.time) - 1
# Get PLD normalization
self.get_norm()
self.loaded = True
def load_model(self, name=None):
'''
Loads a saved version of the model.
'''
if self.clobber:
return False
if name is None:
name = self.name
file = os.path.join(self.dir, '%s.npz' % name)
if os.path.exists(file):
if not self.is_parent:
log.info("Loading '%s.npz'..." % name)
try:
data = np.load(file)
for key in data.keys():
try:
setattr(self, key, data[key][()])
except NotImplementedError:
pass
# HACK: Backwards compatibility. Previous version stored
# the CDPP in the `cdpp6`
# and `cdpp6_arr` attributes. Let's move them over.
if hasattr(self, 'cdpp6'):
self.cdpp = self.cdpp6
del self.cdpp6
if hasattr(self, 'cdpp6_arr'):
self.cdpp_arr = np.array(self.cdpp6_arr)
del self.cdpp6_arr
if hasattr(self, 'gppp'):
self.cdppg = self.gppp
del self.gppp
# HACK: At one point we were saving the figure instances,
# so loading the .npz
# opened a plotting window. I don't think this is the case
# any more, so this
# next line should be removed in the future...
pl.close()
return True
except:
log.warn("Error loading '%s.npz'." % name)
exctype, value, tb = sys.exc_info()
for line in traceback.format_exception_only(exctype, value):
ln = line.replace('\n', '')
log.warn(ln)
os.rename(file, file + '.bad')
if self.is_parent:
raise Exception(
'Unable to load `%s` model for target %d.'
% (self.name, self.ID))
return False
def save_model(self):
'''
Saves all of the de-trending information to disk in an `npz` file
and saves the DVS as a `pdf`.
'''
# Save the data
log.info("Saving data to '%s.npz'..." % self.name)
d = dict(self.__dict__)
d.pop('_weights', None)
d.pop('_A', None)
d.pop('_B', None)
d.pop('_f', None)
d.pop('_mK', None)
d.pop('K', None)
d.pop('dvs', None)
d.pop('clobber', None)
d.pop('clobber_tpf', None)
d.pop('_mission', None)
d.pop('debug', None)
d.pop('transit_model', None)
d.pop('_transit_model', None)
np.savez(os.path.join(self.dir, self.name + '.npz'), **d)
# Save the DVS
pdf = PdfPages(os.path.join(self.dir, self.name + '.pdf'))
pdf.savefig(self.dvs.fig)
pl.close(self.dvs.fig)
d = pdf.infodict()
d['Title'] = 'EVEREST: %s de-trending of %s %d' % (
self.name, self._mission.IDSTRING, self.ID)
d['Author'] = 'Rodrigo Luger'
pdf.close()
def exception_handler(self, pdb):
'''
A custom exception handler.
:param pdb: If :py:obj:`True`, enters PDB post-mortem \
mode for debugging.
'''
# Grab the exception
exctype, value, tb = sys.exc_info()
# Log the error and create a .err file
errfile = os.path.join(self.dir, self.name + '.err')
with open(errfile, 'w') as f:
for line in traceback.format_exception_only(exctype, value):
ln = line.replace('\n', '')
log.error(ln)
print(ln, file=f)
for line in traceback.format_tb(tb):
ln = line.replace('\n', '')
log.error(ln)
print(ln, file=f)
# Re-raise?
if pdb:
raise
def update_gp(self):
'''
Calls :py:func:`gp.GetKernelParams` to optimize the GP and obtain the
covariance matrix for the regression.
'''
self.kernel_params = GetKernelParams(self.time, self.flux,
self.fraw_err,
mask=self.mask,
guess=self.kernel_params,
kernel=self.kernel,
giter=self.giter,
gmaxf=self.gmaxf)
def init_kernel(self):
'''
Initializes the covariance matrix with a guess at
the GP kernel parameters.
'''
if self.kernel_params is None:
X = self.apply_mask(self.fpix / self.flux.reshape(-1, 1))
y = self.apply_mask(self.flux) - np.dot(X, np.linalg.solve(
np.dot(X.T, X), np.dot(X.T, self.apply_mask(self.flux))))
white = np.nanmedian([np.nanstd(c) for c in Chunks(y, 13)])
amp = self.gp_factor * np.nanstd(y)
tau = 30.0
if self.kernel == 'Basic':
self.kernel_params = [white, amp, tau]
elif self.kernel == 'QuasiPeriodic':
self.kernel_params = [white, amp, 1., 20.]
def mask_planets(self):
'''
'''
for i, planet in enumerate(self.planets):
log.info('Masking planet #%d...' % (i + 1))
t0, period, dur = planet
mask = []
t0 += np.ceil((self.time[0] - dur - t0) / period) * period
for t in np.arange(t0, self.time[-1] + dur, period):
mask.extend(np.where(np.abs(self.time - t) < dur / 2.)[0])
self.transitmask = np.array(
list(set(np.concatenate([self.transitmask, mask]))))
def run(self):
'''
Runs the de-trending step.
'''
try:
# Load raw data
log.info("Loading target data...")
self.load_tpf()
self.mask_planets()
self.plot_aperture([self.dvs.top_right() for i in range(4)])
self.init_kernel()
M = self.apply_mask(np.arange(len(self.time)))
self.cdppr_arr = self.get_cdpp_arr()
self.cdpp_arr = np.array(self.cdppr_arr)
self.cdppv_arr = np.array(self.cdppr_arr)
self.cdppr = self.get_cdpp()
self.cdpp = self.cdppr
self.cdppv = self.cdppr
log.info("%s (Raw): CDPP = %s" % (self.name, self.cdpps))
self.plot_lc(self.dvs.left(), info_right='Raw', color='k')
# Loop
for n in range(self.pld_order):
self.lam_idx += 1
self.get_outliers()
if n > 0 and self.optimize_gp:
self.update_gp()
self.cross_validate(self.dvs.right(), info='CV%d' % n)
self.cdpp_arr = self.get_cdpp_arr()
self.cdppv_arr *= self.cdpp_arr
self.cdpp = self.get_cdpp()
self.cdppv = np.nanmean(self.cdppv_arr)
log.info("%s (%d/%d): CDPP = %s" %
(self.name, n + 1, self.pld_order, self.cdpps))
self.plot_lc(self.dvs.left(), info_right='LC%d' % (
n + 1), info_left='%d outliers' % len(self.outmask))
# Save
self.finalize()
self.plot_final(self.dvs.top_left())
self.plot_info(self.dvs)
self.save_model()
except:
self.exception_handler(self.debug)
def publish(self, **kwargs):
'''
Correct the light curve with the CBVs, generate a
cover page for the DVS figure,
and produce a FITS file for publication.
'''
try:
# HACK: Force these params for publication
self.cbv_win = 999
self.cbv_order = 3
self.cbv_num = 1
# Get the CBVs
self._mission.GetTargetCBVs(self)
# Plot the final corrected light curve
cbv = CBV()
self.plot_info(cbv)
self.plot_cbv(cbv.body(), self.fcor, 'Corrected')
self.plot_cbv(cbv.body(), self.flux, 'De-trended', show_cbv=True)
self.plot_cbv(cbv.body(), self.fraw, 'Raw')
# Save the CBV pdf
pdf = PdfPages(os.path.join(self.dir, 'cbv.pdf'))
pdf.savefig(cbv.fig)
pl.close(cbv.fig)
d = pdf.infodict()
d['Title'] = 'EVEREST: %s de-trending of %s %d' % (
self.name, self._mission.IDSTRING, self.ID)
d['Author'] = 'Rodrigo Luger'
pdf.close()
# Now merge the two PDFs
assert os.path.exists(os.path.join(
self.dir, self.name + '.pdf')), \
"Unable to locate %s.pdf." % self.name
output = PdfFileWriter()
pdfOne = PdfFileReader(os.path.join(self.dir, 'cbv.pdf'))
pdfTwo = PdfFileReader(os.path.join(self.dir, self.name + '.pdf'))
# Add the CBV page
output.addPage(pdfOne.getPage(0))
# Add the original DVS page
output.addPage(pdfTwo.getPage(pdfTwo.numPages - 1))
# Write the final PDF
outputStream = open(os.path.join(self.dir, self._mission.DVSFile(
self.ID, self.season, self.cadence)), "wb")
output.write(outputStream)
outputStream.close()
os.remove(os.path.join(self.dir, 'cbv.pdf'))
# Make the FITS file
MakeFITS(self)
except:
self.exception_handler(self.debug)
def publish_csv(self, **kwargs):
'''
'''
try:
# HACK: Force these params for publication
self.cbv_win = 999
self.cbv_order = 3
self.cbv_num = 1
# Get the CBVs
self._mission.GetTargetCBVs(self)
# Write to file!
outfile = os.path.join(self.dir, self._mission.CSVFile(self.ID))
header = self._mission.CSVHEADER % self.ID
mask = np.zeros_like(self.cadn)
for i in range(len(mask)):
if i in self.nanmask:
mask[i] = 1
elif i in self.badmask:
mask[i] = 2
elif i in self.outmask:
mask[i] = 3
data = np.vstack([self.time, self.cadn, self.fcor,
self.flux, self.fraw, mask]).T
np.savetxt(outfile, data,
fmt='%.6f,%d,%.6f,%.6f,%.6f,%d', header=header)
except:
self.exception_handler(self.debug)
class rPLD(Detrender):
'''
The regular PLD model. Nothing fancy.
'''
pass
class nPLD(Detrender):
'''
The "neighboring stars" *PLD* model. This model uses the
*PLD* vectors of neighboring stars to help in the de-trending and can lead
to increased performance over the regular :py:class:`rPLD` model,
particularly for dimmer stars.
'''
def setup(self, **kwargs):
'''
This is called during production de-trending, prior to
calling the :py:obj:`Detrender.run()` method.
:param tuple cdpp_range: If :py:obj:`parent_model` is set, \
neighbors are selected only if \
their de-trended CDPPs fall within this range. Default `None`
:param tuple mag_range: Only select neighbors whose magnitudes are \
within this range. Default (11., 13.)
:param int neighbors: The number of neighboring stars to use in \
the de-trending. The higher this number, the more signals \
there are and hence the more de-trending information there is. \
However, the neighboring star signals are regularized together \
with the target's signals, so adding too many neighbors will \
inevitably reduce the contribution of the target's own \
signals, which may reduce performance. Default `10`
:param str parent_model: By default, :py:class:`nPLD` is run in \
stand-alone mode. The neighbor signals are computed directly \
from their TPFs, so there is no need to have run *PLD* on them \
beforehand. However, if :py:obj:`parent_model` \
is set, :py:class:`nPLD` will use information from the \
:py:obj:`parent_model` model of each neighboring star when \
de-trending. This is particularly useful for identifying \
outliers in the neighbor signals and preventing them from \
polluting the current target. Setting :py:obj:`parent_model` \
to :py:class:`rPLD`, for instance, will use the \
outlier information in the :py:class:`rPLD` model of the \
neighbors (this must have been run ahead of time). \
Note, however, that tests with *K2* data show that including \
outliers in the neighbor signals actually \
*improves* the performance, since many of these outliers \
are associated with events such as thruster firings and are \
present in all light curves, and therefore *help* in the \
de-trending. Default `None`
..note :: Optionally, the :py:obj:`neighbors` may be specified \
directly as a list of target IDs to use. \
In this case, users may also provide a list of \
:py:class:`everest.utils.DataContainer` instances \
corresponding to each of the neighbors in the \
:py:obj:`neighbors_data` kwarg.
'''
# Get neighbors
self.parent_model = kwargs.get('parent_model', None)
neighbors = kwargs.get('neighbors', 10)
neighbors_data = kwargs.get('neighbors_data', None)
if hasattr(neighbors, '__len__'):
self.neighbors = neighbors
else:
num_neighbors = neighbors
self.neighbors = \
self._mission.GetNeighbors(self.ID,
season=self.season,
cadence=self.cadence,
model=self.parent_model,
neighbors=num_neighbors,
mag_range=kwargs.get(
'mag_range', (11., 13.)),
cdpp_range=kwargs.get(
'cdpp_range', None),
aperture_name=self.aperture_name)
if len(self.neighbors):
if len(self.neighbors) < num_neighbors:
log.warn("%d neighbors requested, but only %d found." %
(num_neighbors, len(self.neighbors)))
elif num_neighbors > 0:
log.warn("No neighbors found! Running standard PLD...")
for n, neighbor in enumerate(self.neighbors):
log.info("Loading data for neighboring target %d..." % neighbor)
if neighbors_data is not None:
data = neighbors_data[n]
data.mask = np.array(
list(set(np.concatenate([data.badmask, data.nanmask]))),
dtype=int)
data.fraw = np.sum(data.fpix, axis=1)
elif self.parent_model is not None and self.cadence == 'lc':
# We load the `parent` model. The advantage here is
# that outliers have properly been identified and masked.
# I haven't tested this on short
# cadence data, so I'm going to just forbid it...
data = eval(self.parent_model)(
neighbor, mission=self.mission, is_parent=True)
else:
# We load the data straight from the TPF. Much quicker,
# since no model must be run in advance. Downside is we
# don't know where the outliers are. But based
# on tests with K2 data, the de-trending is actually
# *better* if the outliers are
# included! These are mostly thruster fire events and other
# artifacts common to
# all the stars, so it makes sense that we might want
# to keep them in the design matrix.
data = self._mission.GetData(neighbor, season=self.season,
clobber=self.clobber_tpf,
cadence=self.cadence,
aperture_name=self.aperture_name,
saturated_aperture_name=
self.saturated_aperture_name,
max_pixels=self.max_pixels,
saturation_tolerance=
self.saturation_tolerance,
get_hires=False, get_nearby=False)
if data is None:
raise Exception(
"Unable to retrieve data for neighboring target.")
data.mask = np.array(
list(set(np.concatenate([data.badmask, data.nanmask]))),
dtype=int)
data.fraw = np.sum(data.fpix, axis=1)
# Compute the linear PLD vectors and interpolate over
# outliers, NaNs and bad timestamps
X1 = data.fpix / data.fraw.reshape(-1, 1)
X1 = Interpolate(data.time, data.mask, X1)
if self.X1N is None:
self.X1N = np.array(X1)
else:
self.X1N = np.hstack([self.X1N, X1])
del X1
del data
class iPLD(Detrender):
'''
The iterative PLD model.
..warning :: Deprecated and not thoroughly tested.
'''
def setup(self, **kwargs):
'''
This is called during production de-trending, prior to
calling the :py:obj:`Detrender.run()` method.
:param str parent_model: The name of the model to operate on. \
Default `nPLD`
'''
# Load the parent model
self.parent_model = kwargs.get('parent_model', 'nPLD')
if not self.load_model(self.parent_model):
raise Exception('Unable to load parent model.')
# Save static copies of the de-trended flux,
# the outlier mask and the lambda array
self._norm = np.array(self.flux)
self.recmask = np.array(self.mask)
self.reclam = np.array(self.lam)
# Now reset the model params
self.optimize_gp = False
nseg = len(self.breakpoints)
self.lam_idx = -1
self.lam = [
[1e5] + [None for i in range(self.pld_order - 1)]
for b in range(nseg)]
self.cdpp_arr = np.array([np.nan for b in range(nseg)])
self.cdppr_arr = np.array([np.nan for b in range(nseg)])
self.cdppv_arr = np.array([np.nan for b in range(nseg)])
self.cdpp = np.nan
self.cdppr = np.nan
self.cdppv = np.nan
self.cdppg = np.nan
self.model = np.zeros_like(self.time)
self.loaded = True
class pPLD(Detrender):
'''
A neighboring PLD extension that uses Powell's method to find the
cross-validation parameter :py:obj:`lambda`.
'''
def setup(self, **kwargs):
'''
This is called during production de-trending, prior to
calling the :py:obj:`Detrender.run()` method.
:param inter piter: The number of iterations in the minimizer. \
Default 3
:param int pmaxf: The maximum number of function evaluations per \
iteration. Default 300
:param float ppert: The fractional amplitude of the perturbation on \
the initial guess. Default 0.1
'''
# Check for saved model
clobber = self.clobber
self.clobber = False
if not self.load_model('nPLD'):
raise Exception("Can't find `nPLD` model for target.")
self.clobber = clobber
# Powell iterations
self.piter = kwargs.get('piter', 3)
self.pmaxf = kwargs.get('pmaxf', 300)
self.ppert = kwargs.get('ppert', 0.1)
def run(self):
'''
Runs the de-trending.
'''
try:
# Plot original
self.plot_aperture([self.dvs.top_right() for i in range(4)])
self.plot_lc(self.dvs.left(), info_right='nPLD', color='k')
# Cross-validate
self.cross_validate(self.dvs.right())
self.compute()
self.cdpp_arr = self.get_cdpp_arr()
self.cdpp = self.get_cdpp()
# Plot new
self.plot_lc(self.dvs.left(), info_right='Powell', color='k')
# Save
self.plot_final(self.dvs.top_left())
self.plot_info(self.dvs)
self.save_model()
except:
self.exception_handler(self.debug)
def cross_validate(self, ax):
'''
Performs the cross-validation step.
'''
# The CDPP to beat
cdpp_opt = self.get_cdpp_arr()
# Loop over all chunks
for b, brkpt in enumerate(self.breakpoints):
log.info("Cross-validating chunk %d/%d..." %
(b + 1, len(self.breakpoints)))
# Mask for current chunk
m = self.get_masked_chunk(b)
# Mask transits and outliers
time = self.time[m]
flux = self.fraw[m]
ferr = self.fraw_err[m]
med = np.nanmedian(self.fraw)
# Setup the GP
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(time, ferr)
# The masks
masks = list(Chunks(np.arange(0, len(time)),
len(time) // self.cdivs))
# The pre-computed matrices
pre_v = [self.cv_precompute(mask, b) for mask in masks]
# Initialize with the nPLD solution
log_lam_opt = np.log10(self.lam[b])
scatter_opt = self.validation_scatter(
log_lam_opt, b, masks, pre_v, gp, flux, time, med)
log.info("Iter 0/%d: " % (self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
# Do `piter` iterations
for p in range(self.piter):
# Perturb the initial condition a bit
log_lam = np.array(
np.log10(self.lam[b])) * \
(1 + self.ppert * np.random.randn(len(self.lam[b])))
scatter = self.validation_scatter(
log_lam, b, masks, pre_v, gp, flux, time, med)
log.info("Initializing at: " +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
# Call the minimizer
log_lam, scatter, _, _, _, _ = \
fmin_powell(self.validation_scatter, log_lam,
args=(b, masks, pre_v, gp, flux, time, med),
maxfun=self.pmaxf, disp=False,
full_output=True)
# Did it improve the CDPP?
tmp = np.array(self.lam[b])
self.lam[b] = 10 ** log_lam
self.compute()
cdpp = self.get_cdpp_arr()[b]
self.lam[b] = tmp
if cdpp < cdpp_opt[b]:
cdpp_opt[b] = cdpp
log_lam_opt = log_lam
# Log it
log.info("Iter %d/%d: " % (p + 1, self.piter) +
"logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam]), scatter))
# The best solution
log.info("Found minimum: logL = (%s), s = %.3f" %
(", ".join(["%.3f" % l for l in log_lam_opt]),
scatter_opt))
self.lam[b] = 10 ** log_lam_opt
# We're just going to plot lambda as a function of chunk number
bs = np.arange(len(self.breakpoints))
color = ['k', 'b', 'r', 'g', 'y']
for n in range(self.pld_order):
ax[0].plot(bs + 1, [np.log10(self.lam[b][n])
for b in bs], '.', color=color[n])
ax[0].plot(bs + 1, [np.log10(self.lam[b][n])
for b in bs], '-', color=color[n], alpha=0.25)
ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5)
ax[0].margins(0.1, 0.1)
ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1))
ax[0].set_xticklabels([])
# Now plot the CDPP
cdpp_arr = self.get_cdpp_arr()
ax[1].plot(bs + 1, cdpp_arr, 'b.')
ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25)
ax[1].margins(0.1, 0.1)
ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5)
ax[1].set_xlabel(r'Chunk', fontsize=5)
ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1))
def validation_scatter(self, log_lam, b, masks, pre_v, gp, flux,
time, med):
'''
Computes the scatter in the validation set.
'''
# Update the lambda matrix
self.lam[b] = 10 ** log_lam
# Validation set scatter
scatter = [None for i in range(len(masks))]
for i in range(len(masks)):
model = self.cv_compute(b, *pre_v[i])
try:
gpm, _ = gp.predict(flux - model - med, time[masks[i]])
except ValueError:
# Sometimes the model can have NaNs if
# `lambda` is a crazy value
return 1.e30
fdet = (flux - model)[masks[i]] - gpm
scatter[i] = 1.e6 * (1.4826 * np.nanmedian(np.abs(fdet / med -
np.nanmedian(fdet / med))) /
np.sqrt(len(masks[i])))
return np.max(scatter)
|
rodluger/everest
|
everest/detrender.py
|
Python
|
mit
| 68,576
|
[
"Gaussian"
] |
a989af1f8bee67b514837342a20af4d9b2528b6fa69b6919c8fb55f3adbcfa03
|
#!/usr/bin/env python3
# Copyright (C) 2020
# Max Planck Institute for Polymer Research & JGU Mainz
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import espressopp
from espressopp.tools import readxyz
import time
def generate_md(use_vec=True, vec_mode=""):
print('{}USING VECTORIZATION'.format('NOT ' if not use_vec else ''))
if use_vec:
print('MODE={}'.format(vec_mode))
nsteps = 1
isteps = 10
#
# NOTE: For performance comparison increase isteps to 1000
#
rc = 2.5
skin = 0.3
timestep = 0.005
epsilon = 1.0
sigma = 1.0
# ensure deterministic trajectories
temperature = None
xyz_file = "lennard_jones_fluid_10000_2048.xyz"
pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = readxyz(xyz_file)
box = (Lx, Ly, Lz)
num_particles = len(pid)
system, integrator = espressopp.standard_system.Default(box=box, rc=rc, skin=skin, dt=timestep, temperature=temperature)
if use_vec:
vec = espressopp.vectorization.Vectorization(system, integrator, mode=vec_mode)
props = ['id', 'type', 'mass', 'pos', 'v']
new_particles = []
for i in range(num_particles):
part = [i + 1, 0, 1.0, espressopp.Real3D(x[i], y[i], z[i]), espressopp.Real3D(vx[i], vy[i], vz[i])]
new_particles.append(part)
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
# Lennard-Jones with Verlet list
if use_vec:
vl = espressopp.vectorization.VerletList(system, vec, cutoff = rc)
interLJ = espressopp.vectorization.interaction.VerletListLennardJones(vl)
potLJ = espressopp.vectorization.interaction.LennardJones(epsilon=1.0, sigma=1.0, cutoff=rc, shift=0)
else:
vl = espressopp.VerletList(system, cutoff = rc)
interLJ = espressopp.interaction.VerletListLennardJones(vl)
potLJ = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, cutoff=rc, shift=0)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
system.addInteraction(interLJ)
print('')
print('number of particles = ', num_particles)
print("storage = ", system.storage.__class__.__name__)
print("integrator = ", integrator.__class__.__name__)
print("verletlist = ", ".".join([vl.__class__.__module__,vl.__class__.__name__]))
print("interaction = ", ".".join([interLJ.__class__.__module__,interLJ.__class__.__name__]))
print('')
if hasattr(vl,'resetTimers'):
vl.resetTimers()
if use_vec: vl.rebuildPairs()
espressopp.tools.analyse.info(system, integrator)
start_time = time.process_time()
for k in range(nsteps):
integrator.run(isteps)
if use_vec: vl.rebuildPairs()
espressopp.tools.analyse.info(system, integrator)
end_time = time.process_time()
espressopp.tools.analyse.final_info(system, integrator, vl, start_time, end_time)
# retrieve particle positions after run
configurations = espressopp.analysis.Configurations(system, pos=True, vel=True, force=True)
configurations.gather()
return [configurations[0][i] for i in range(num_particles)]
class TestVectorization(unittest.TestCase):
def test1(self):
''' Ensure that positions after integration are the same for both vec and non-vec versions '''
print('-'*70)
pos0 = generate_md(True,'AOS')
print('-'*70)
pos1 = generate_md(True,'SOA')
print('-'*70)
pos2 = generate_md(False)
print('-'*70)
self.assertEqual(len(pos0), len(pos2))
diff = [(pos0[i]-pos2[i]).sqr() for i in range(len(pos2))]
for d in diff:
self.assertAlmostEqual(d,0.0,8)
self.assertEqual(len(pos1), len(pos2))
diff = [(pos1[i]-pos2[i]).sqr() for i in range(len(pos1))]
for d in diff:
self.assertAlmostEqual(d,0.0,8)
if __name__ == "__main__":
unittest.main()
|
espressopp/espressopp
|
testsuite/vectorization/test_vectorization.py
|
Python
|
gpl-3.0
| 4,635
|
[
"ESPResSo"
] |
db1ac0536d861e0a2e80949a0858788b9f1f2493c0291979d59d5033509c9c27
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.