text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#! /usr/bin/env python
from __future__ import print_function # python3-style print
from flask import Flask, render_template, request
import examples.e0 as e0
import examples.e1 as e1
import examples.e2 as e2
import examples.e3 as e3
import examples.e4 as e4
import examples.e5 as e5
import examples.e6 as e6
import examples.e7 as e7
import examples.e8 as e8
import examples.e9 as e9
import examples.e10 as e10
import examples.e11 as e11
import examples.e12 as e12
import examples.e13 as e13
import examples.e14 as e14
import examples.e15 as e15
import examples.t_units as t_units
import examples.t_units_fluid as t_units_fluid
import examples.t_axis as t_axis
import examples.t_distribs as t_distribs
# create application...
app = Flask(__name__)
# turn on debugging for apache instance... to see errors tail /var/log/httpd/error_log
# (this should not be enabled for instances exposed to the outside world)
app.config[ "DEBUG" ] = True
@app.route("/")
def welcome():
return render_template("home")
@app.route("/pages/examplist")
def examplist():
return render_template("examplist")
@app.route("/LICENSE.txt")
def license():
return "[Placeholder]", 200, { "Content-Type":"text/plain" }
@app.route("/svgdatashapes.py")
def sds_src():
return "[Placeholder]", 200, { "Content-Type":"text/plain" }
@app.route("/svgdatashapes_dt.py")
def sds_dt_src():
return "[Placeholder]", 200, { "Content-Type":"text/plain" }
@app.route("/pages/e0")
def example0():
svg = e0.example0()
writesvg( svg, "e0" )
return render_template("view1", filename="e0", title="Hello world", svg=svg )
@app.route("/pages/e1")
def example1():
svg = e1.example1()
writesvg( svg, "e1" )
return render_template("view1", filename="e1", title="Example 1", svg=svg )
@app.route("/pages/e2")
def example2():
svg = e2.example2()
writesvg( svg, "e2" )
return render_template("view1", filename="e2", title="Example 2", svg=svg, sdlink=1,
comment="Hover over datapoints to see tooltips" )
@app.route("/pages/e3")
def example3():
svg = e3.example3()
writesvg( svg, "e3" )
return render_template("view1", filename="e3", title="Example 3", svg=svg,
comment="Hover over slices to see tooltips" )
@app.route("/pages/e4")
def example4():
svg = e4.example4()
writesvg( svg, "e4" )
return render_template("view1", filename="e4", title="Example 4", svg=svg )
@app.route("/pages/e5")
def example5():
svg = e5.example5()
writesvg( svg, "e5" )
return render_template("view1", filename="e5", title="Example 5", svg=svg )
@app.route("/pages/e6")
def example6():
svg = e6.example6( )
writesvg( svg, "e6" )
return render_template("view1", filename="e6", title="Example 6", svg=svg,
comment="Hover over datapoints to see tooltips" )
@app.route("/pages/e6a")
def example6a():
svg = e6.example6( dobands=True )
writesvg( svg, "e6a" )
return render_template("view1", filename="e6", title="Example 6a", svg=svg,
comment="In this example dobands=True" )
@app.route("/pages/e7")
def example7():
svg = e7.example7()
writesvg( svg, "e7" )
return render_template("view1", filename="e7", title="Example 7", svg=svg, sdlink=2,
comment="Hover over outlier datapoints for tooltips" )
@app.route("/pages/e7a")
def example7a():
svg = e7.example7( ylog=True )
writesvg( svg, "e7a" )
return render_template("view1", filename="e7", title="Example 7a", svg=svg, sdlink=2,
comment="In this example ylog=True ... hover over outlier datapoints for tooltips" )
@app.route("/pages/e8")
def example8():
svg = e8.example8()
writesvg( svg, "e8" )
return render_template("view1", filename="e8", title="Example 8", svg=svg, sdlink=3,
comment="In this example dispmode='curves'" )
@app.route("/pages/e8a")
def example8a():
svg = e8.example8( dispmode="bars" )
writesvg( svg, "e8a" )
return render_template("view1", filename="e8", title="Example 8a", svg=svg, sdlink=3,
comment="In this example dispmode='bars'" )
@app.route("/pages/e8b")
def example8b():
svg = e8.example8( dispmode="updown" )
writesvg( svg, "e8b" )
return render_template("view1", filename="e8", title="Example 8b", svg=svg, sdlink=3,
comment="In this example dispmode='updown'" )
@app.route("/pages/e9")
def example9():
svg = e9.example9()
writesvg( svg, "e9" )
return render_template("view1", filename="e9", title="Example 9", svg=svg, sdlink=2 )
@app.route("/pages/e10")
def example10():
svg = e10.example10()
writesvg( svg, "e10" )
return render_template("view1", filename="e10", title="Example 10", svg=svg,
comment="Hover over cells for tooltips" )
@app.route("/pages/e11")
def example11():
svg = e11.example11()
writesvg( svg, "e11" )
return render_template("view1", filename="e11", title="Example 11", svg=svg, sdlink=4 )
@app.route("/pages/e12")
def example12():
svg = e12.example12()
writesvg( svg, "e12" )
return render_template("view1", filename="e12", title="Example 12", svg=svg )
@app.route("/pages/e13")
def example13():
svg = e13.example13()
writesvg( svg, "e13" )
return render_template("view1", filename="e13", title="Example 13", svg=svg )
@app.route("/pages/e14")
def example14():
svg = e14.example14()
writesvg( svg, "e14" )
return render_template("view1", filename="e14", title="Example 14", svg=svg )
@app.route("/pages/e15")
def example15():
svg = e15.example15()
writesvg( svg, "e15" )
return render_template("view1", filename="e15", title="Example 15", svg=svg )
@app.route("/pages/test_axis")
def test_axis():
svg = t_axis.test_axis()
return render_template("view1", filename="t_axis", title="Axis rendering test", svg=svg )
@app.route("/pages/test_units")
def test_units():
svg = t_units.test_units()
return render_template("view1", filename="t_units", title="SVG units", svg=svg )
@app.route("/pages/test_fluid_ff")
def test_fluid_ff():
svg = t_units_fluid.test_units_fluid( browser='firefox' )
return render_template("view1", filename="t_units_fluid", title="SVG units with fluid sizing (static firefox example)", svg=svg )
@app.route("/pages/test_fluid_wk")
def test_fluid_wk():
svg = t_units_fluid.test_units_fluid( browser='chrome' )
return render_template("view1", filename="t_units_fluid", title="SVG units with fluid sizing (static chrome / webkit example)", svg=svg )
@app.route("/pages/test_distribs")
def test_distribs():
outstr = t_distribs.test_distribs()
return render_template("view1", filename="t_distribs", title="Test of frequency distributions and percentiles computation", textresult=outstr )
@app.route("/pages/sampledata/<int:id>")
def sampledata( id ):
return render_template( "sampledata", id=id )
def writesvg( svgcode, basename ):
# refresh the static copy with latest...
pathname = "./static/examples/" + basename + ".svg"
try:
fp = open( pathname, "w" )
except:
print( "ERROR: cannot open " + pathname + " for write" )
return False
print( '<?xml version="1.0" encoding="utf-8"?>', file=fp ) # these headers necessary for full functionality eg. Δ
print( '<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">', file=fp )
# html special chars (beyond a basic few) cause xml problems via <img> ... remove them
print( svgcode.replace( "Δ", ""), file=fp )
fp.close()
# browser = request.user_agent.browser
# if this is a personal development version, do: python application.py ...then visit http://bhmpd01.jax.org:5005
# note, to access across network flask requires host="0.0.0.0"
if __name__ == '__main__':
app.debug = True # turn on debugging for gitclone instance
app.run(host="0.0.0.0", port=5005)
|
grubbcode/minplot
|
application.py
|
Python
|
mit
| 7,912
|
[
"VisIt"
] |
39c7fb6f75c5675039ff20198e9c54b274a7bd6bd1c1b0546f4d58bbb7be6c0d
|
from __future__ import division
from iotbx.pdb.multimer_reconstruction import multimer
from scitbx.array_family import flex
from libtbx import group_args
import mmtbx.f_model
import scitbx.lbfgs
from iotbx import pdb
from cctbx import xray
import sys,os
class minimizer(object):
def __init__(self,
fmodel,
max_iterations=100,
n_ncs_mtrix=0,
sites = False,
u_iso = False):
self.fmodel = fmodel
self.fmodel.xray_structure.scatterers().flags_set_grads(state=False)
self.x_target_functor = self.fmodel.target_functor()
self.n_ncs_mtrix = n_ncs_mtrix
self.sites = sites
self.u_iso = u_iso
if(self.sites):
# Coordiantes
self.x = self.fmodel.xray_structure.sites_cart().as_double()
if(self.u_iso):
# anisotropic displacement factors
assert self.fmodel.xray_structure.scatterers().size() == \
self.fmodel.xray_structure.use_u_iso().count(True)
self.x = self.fmodel.xray_structure.extract_u_iso_or_u_equiv()
if(self.sites):
xray.set_scatterer_grad_flags(
scatterers = self.fmodel.xray_structure.scatterers(),
site = True)
if(self.u_iso):
sel = flex.bool(
self.fmodel.xray_structure.scatterers().size(), True).iselection()
self.fmodel.xray_structure.scatterers().flags_set_grad_u_iso(
iselection = sel)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations),
exception_handling_params=scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_rounding_errors=True,
ignore_line_search_failed_step_at_lower_bound=True,
ignore_line_search_failed_maxfev=True))
self.fmodel.xray_structure.tidy_us()
self.fmodel.xray_structure.apply_symmetry_sites()
self.fmodel.update_xray_structure(
xray_structure = self.fmodel.xray_structure,
update_f_calc = True)
def compute_functional_and_gradients(self):
"""() -> float,flex.double array
This methode is called from:
phenix_sources\cctbx_project\scitbx\lbfgs\__init__.py
It returns the target_work and gradients to lbfgs
"""
print '='*80
if self.sites:
self.fmodel.xray_structure.set_sites_cart(
sites_cart = flex.vec3_double(self.x))
print len(flex.vec3_double(self.x))
if self.u_iso:
self.fmodel.xray_structure.set_u_iso(values = self.x)
self.fmodel.update_xray_structure(
xray_structure = self.fmodel.xray_structure,
update_f_calc = True)
tgx = self.x_target_functor(compute_gradients=True)
if self.sites:
tx = tgx.target_work()
gx = flex.vec3_double(
tgx.gradients_wrt_atomic_parameters(site=True).packed())
f = tx
g = gx
if self.u_iso:
tx = tgx.target_work()
gx = tgx.gradients_wrt_atomic_parameters(u_iso=True)
f = tx
g = gx
# When we have MTRIX records, use only the
# gradients of the first NCS copy
# ncs_end = len(g)//(self.n_ncs_mtrix+1)
# assert ncs_end*(self.n_ncs_mtrix+1)==len(g)
# g = g[:ncs_end]
return f, g.as_double()
def get_inputs(file_name):
'''
obj = get_inputs(pdb_file_name)
Retruns:
--------
obj.pdb_hierarchy
obj.xray_structure
'''
pdb_inp = pdb.input(file_name=file_name)
return group_args(
pdb_hierarchy = pdb_inp.construct_hierarchy(),
xray_structure = pdb_inp.xray_structure_simple())
def run(file_to_refine,f_obs,r_free_flags,n_macro_cycle=10,r_work_target=0.0):
"""
Arguments:
---------
file_to_refine: a shaken copy of one NCS copy
used to produce the reference ASU
f_obs: observed frequency
r_free_flags: R-free-flags, flags for cross-validation data
n_macro_cycle: Number of refinement cycles
r_work_target: Refinement will stop when r_work <= r_work_target
"""
crystal_symmetry = f_obs.crystal_symmetry()
# Data to refine
m = multimer(
pdb_input_file_name=file_to_refine,
reconstruction_type='cau',error_handle=True,eps=1e-2)
# Keep original crystal symetry
xrs_poor = m.assembled_multimer.as_pdb_input().\
xray_structure_simple(crystal_symmetry=crystal_symmetry)
# get fmodel
params = mmtbx.f_model.sf_and_grads_accuracy_master_params.extract()
params.algorithm = "direct"
fmodel = mmtbx.f_model.manager(
f_obs = f_obs,
r_free_flags = r_free_flags,
xray_structure = xrs_poor,
sf_and_grads_accuracy_params = params,
target_name = "ls_wunit_kunit")
# refinement loop
print "start r_factor: %6.4f" % fmodel.r_work()
for macro_cycle in xrange(n_macro_cycle):
if(1):
# refine coordinates
minimized = minimizer(
fmodel = fmodel,
n_ncs_mtrix = m.number_of_transforms,
sites = True)
print " macro_cycle %3d (sites) r_factor: %6.4f"%(macro_cycle, fmodel.r_work())
if(0):
# refine ADPs (atomic displacement parameter)
minimized = minimizer(
fmodel = fmodel,
n_ncs_mtrix = m.number_of_transforms,
u_iso = True)
print " macro_cycle %3d (adp) r_factor: %6.4f"%(macro_cycle, fmodel.r_work())
if fmodel.r_work() <= r_work_target: break
if(1):
m.assembled_multimer.adopt_xray_structure(fmodel.xray_structure)
m.write(pdb_output_file_name="refined.pdb")
if __name__=='__main__':
osType = sys.platform
if osType.startswith('win'):
tempdir = r'C:\Phenix\Dev\Work\work\NCS\junk'
else:
tempdir = '/net/cci/youval/Work/work/NCS/junk'
os.chdir(tempdir)
# Test files
file_to_refine = 'ncs1_shaken.pdb'
file_reference = 'asu0.pdb'
# get xray_structure from reference PDB file
inp = get_inputs(file_name=file_reference)
xrs = inp.xray_structure
# simulate Fobs
f_obs = abs(xrs.structure_factors(d_min=1.0, algorithm="direct").f_calc())
r_free_flags = f_obs.generate_r_free_flags()
# Start refinement
run(file_to_refine=file_to_refine,
n_macro_cycle=2,
f_obs=f_obs,
r_free_flags=r_free_flags)
|
youdar/work
|
work/NCS/refinement_strict_ncs.py
|
Python
|
mit
| 6,121
|
[
"CRYSTAL"
] |
6515ea602431d95eaa54770d5509f0634fba77be092f4e8ed1defdcecbe40a91
|
__author__ = "Just van den Broecke"
__date__ = "Dec 21, 2011 3:46:27 PM$"
"""
Naam: BAGObject.py
Omschrijving: Classes voor BAG-objecten
Per BAG-objecttype (woonplaats, openbareruimte, nummeraanduiding,
ligplaats, standplaats, verblijfsobject, pand) is er een aparte class.
De BAGObject classes zijn pure data classes. Functionaliteit mbt
lezen/schrijven database zijn elders gedefinieerd.
Deze classes zijn een vereenvoudiging van de originele classes in libbagextract.py.
De BAG-objecttype-classes zijn afgeleid van de basisclass BAGObject.
Hierin is een BAG-object een verzameling van BAG-attributen met elk
hun eigen eigenschappen.
Auteur: Just van den Broecke (Matthijs van der Deijl libbagextract.py origineel)
Versie: 1.0
- basis versie
Datum: 21 december 2011
OpenGeoGroep.nl
"""
from bagattribuut import BAGattribuut, BAGbooleanAttribuut, BAGdateAttribuut, BAGdatetimeAttribuut, BAGenumAttribuut, BAGenumRelatieAttribuut
from bagattribuut import BAGgeometrieValidatie, BAGintegerAttribuut, BAGmultiPolygoon, BAGnumeriekAttribuut, BAGpoint, BAGpolygoon, BAGrelatieAttribuut
from bagattribuut import BAGstringAttribuut
from etree import stripschema
# --------------------------------------------------------------------------------------------------------
# Class BAGObject
# Omschrijving Basisclass voor de 7 types BAG-objecten. Deze class bevat de generieke attributen die
# in al deze types BAG-objecten voorkomen.
# --------------------------------------------------------------------------------------------------------
class BAGObject:
# Constructor
def __init__(self, tag="", naam="", objectType=""):
self.attributen = {}
self.attributen_volgorde = []
self.voegToe(BAGstringAttribuut(16, "identificatie", "bag_LVC:identificatie"))
self.voegToe(BAGbooleanAttribuut("aanduidingRecordInactief", "bag_LVC:aanduidingRecordInactief"))
self.voegToe(BAGintegerAttribuut("aanduidingRecordCorrectie", "bag_LVC:aanduidingRecordCorrectie"))
self.voegToe(BAGbooleanAttribuut("officieel", "bag_LVC:officieel"))
self.voegToe(BAGbooleanAttribuut("inOnderzoek", "bag_LVC:inOnderzoek"))
self.voegToe(BAGdatetimeAttribuut("begindatumTijdvakGeldigheid", "bag_LVC:tijdvakgeldigheid/bagtype:begindatumTijdvakGeldigheid"))
self.voegToe(BAGdatetimeAttribuut("einddatumTijdvakGeldigheid", "bag_LVC:tijdvakgeldigheid/bagtype:einddatumTijdvakGeldigheid"))
self.voegToe(BAGstringAttribuut(20, "documentnummer", "bag_LVC:bron/bagtype:documentnummer"))
self.voegToe(BAGdateAttribuut("documentdatum", "bag_LVC:bron/bagtype:documentdatum"))
self.relaties = []
# Attrs tbv mutatie verwerking
self.origineelObj = None
self.verwerkings_id = None
self._tag = tag
self._naam = naam
self._objectType = objectType
# Geef de XML-tag bij het type BAG-object.
def voegToe(self, attribuut):
attribuut._parentObj = self
self.attributen[attribuut.naam()] = attribuut
self.attributen_volgorde.append(attribuut)
# Geef de XML-tag bij het type BAG-object.
def tag(self):
return self._tag
# Geef unieke identificatie (nummer) van BAG-object.
def identificatie(self):
attr = self.attribuut('identificatie')
# Sanity check
if not attr:
return -1
return attr._waarde
# Geef de naam bij het type BAG-object.
def naam(self):
return self._naam
# Retourneer een omschrijving van het object, bestaande uit de identificatie en het adres
def omschrijving(self):
return "%s %s" % (self.objectType(), self.identificatie())
# Geef het objecttype bij het type BAG-object.
def objectType(self):
return self._objectType
# Geef aan of het object een geometrie heeft.
# Deze method kan worden overloaded in de afgeleide classes
def heeftGeometrie(self):
return False
# Verkrijg relatie attributen van bepaald object-type
def getRelaties(self, relatieNaam=None):
result = []
for relatie in self.relaties:
if not relatieNaam or (relatieNaam and relatieNaam == relatie.relatieNaam()):
result.append(relatie)
return result
# Initialisatie vanuit XML
def leesUitXML(self, xml):
for attribuut in self.attributen_volgorde:
attribuut.leesUitXML(xml)
for relatie in self.relaties:
relatie.leesUitXML(xml)
# Initialisatie vanuit array/list met attribuut waarden
def zetWaarden(self, value_list):
i = 0
for attribuut in self.attributen_volgorde:
attribuut.setWaarde(value_list[i])
i += 1
# Zet named attribuut waarde
def zetWaarde(self, name, value):
attr = self.attribuut(name)
if attr:
attr.setWaarde(value)
# Geef het actuele voorkomen van het object, geselecteerd uit de database op basis van
# de identificatie
def maakSelectSQL(self):
sql = "SELECT "
for attribuut in self.attributen_volgorde:
if attribuut.naam() == 'geom_valid':
continue
naam = attribuut.naam()
if attribuut.isGeometrie():
naam = 'ST_AsText(ST_Force_2D(%s))' % naam
sql += naam + ", "
sql += " identificatie FROM " + self.naam() + "actueelbestaand"
sql += " WHERE identificatie = " + str(self.attribuut('identificatie').waarde())
return sql
# Retourneer boolean waarde of het attribuut bestaat
def heeftAttribuut(self, naam):
return naam in self.attributen
# Retourneer het attribuut met de gegeven naam
def attribuut(self, naam):
return self.attributen[naam]
# Print informatie over het object op het scherm
def schrijf(self):
print("*** %s ***" % (self.naam()))
for attribuut in self.attributen_volgorde:
attribuut.schrijf()
for relatie in self.relaties:
relatie.schrijf()
# Genereer SQL voor een COPY.
def maakCopySQL(self, buffer):
velden = []
i = 0
for attribuut in self.attributen_volgorde:
velden.append(attribuut.naam())
w = attribuut.waardeSQL()
if not w:
# NULL value
w = r'\N'
# if attribuut.naam() == 'geom_valid':
# w = repr(False)
if i > 0:
# Column separator
buffer.write("~")
# print("~")
buffer.write(w)
# print(w)
i += 1
self.velden = velden
# End of record separator
buffer.write("\n")
# print("\n")
# Optioneel: relatie objecten
for relatie in self.relaties:
relatie.maakCopySQL()
# Genereer SQL voor een INSERT (als prepared statement).
def maakInsertSQL(self):
velden = ""
waardes = ""
self.inhoud = []
for attribuut in self.attributen_volgorde:
if velden != "":
velden += ","
waardes += ","
velden += attribuut.naam()
waardes += attribuut.waardeSQLTpl()
self.inhoud.append(attribuut.waardeSQL())
self.sql = "INSERT INTO " + self.naam() + " (" + velden + ") VALUES (" + waardes + ")"
# Optioneel: relatie objecten
for relatie in self.relaties:
relatie.maakInsertSQL()
# Genereer SQL voor een UPDATE (als prepared statement).
def maakUpdateSQL(self):
nameVals = ""
self.inhoud = []
for attribuut in self.attributen_volgorde:
if nameVals != "":
nameVals += ","
nameVals += attribuut.naam() + ' = ' + attribuut.waardeSQLTpl()
self.inhoud.append(attribuut.waardeSQL())
# UPDATE weather SET temp_lo = temp_lo+1, temp_hi = temp_lo+15, prcp = DEFAULT
# WHERE city = 'San Francisco' AND date = '2003-07-03';
# Unieke key is combined (identificatie,aanduidingRecordInactief,aanduidingrecordcorrectie,begindatumTijdvakGeldigheid)
where = "WHERE identificatie = %s AND aanduidingrecordinactief = %s AND aanduidingrecordcorrectie = %s AND begindatumTijdvakGeldigheid = %s "
self.inhoud.extend((self.origineelObj.attribuut('identificatie').waardeSQL(),
self.origineelObj.attribuut('aanduidingRecordInactief').waardeSQL(),
self.origineelObj.attribuut('aanduidingRecordCorrectie').waardeSQL(),
self.origineelObj.attribuut('begindatumTijdvakGeldigheid').waardeSQL()))
self.sql = "UPDATE " + self.naam() + " SET " + nameVals + " " + where
# Optioneel: relatie objecten
for relatie in self.relaties:
relatie.maakUpdateSQL()
# Genereer SQL voor (DROP) en CREATE TABLE
def maakTabel(self):
sqlinit = ""
sql = "CREATE TABLE " + self.naam() + " (\n gid SERIAL,\n "
attributen = []
for attribuut in self.attributen_volgorde:
sqlinit += attribuut.sqlinit()
if attribuut.enkelvoudig() and not attribuut.isGeometrie():
attributen.append(attribuut.naam() + " " + attribuut.sqltype())
sql += ",\n ".join(attributen) + "\n)"
if self.heeftGeometrie():
sql += " WITH (OIDS=true);\n"
for attribuut in self.attributen_volgorde:
if attribuut.isGeometrie():
sql += "SELECT AddGeometryColumn('public', '%s', '%s', 28992, '%s', %s);\n" % \
(self.naam().lower(), attribuut.naam(), attribuut.soort(), attribuut.dimensie())
else:
sql += ";\n"
return "DROP TABLE IF EXISTS " + self.naam() + " CASCADE;\n" + sqlinit + sql + "\n"
# --------------------------------------------------------------------------------------------------------
# Class Woonplaats
# Afgeleid van BAGObject
# Omschrijving Class voor het BAG-objecttype Woonplaats.
# --------------------------------------------------------------------------------------------------------
class Woonplaats(BAGObject):
woonplaatsStatusTypes = ['Woonplaats aangewezen', 'Woonplaats ingetrokken']
def __init__(self):
BAGObject.__init__(self, "bag_LVC:Woonplaats", "woonplaats", "WPL")
self.voegToe(BAGstringAttribuut(80, "woonplaatsNaam", "bag_LVC:woonplaatsNaam"))
self.voegToe(BAGenumAttribuut(Woonplaats.woonplaatsStatusTypes, "woonplaatsStatus", "bag_LVC:woonplaatsStatus"))
self.voegToe(BAGmultiPolygoon(2, "geovlak", "bag_LVC:woonplaatsGeometrie"))
self.voegToe(BAGgeometrieValidatie("geom_valid", "geovlak"))
def heeftGeometrie(self):
return True
# --------------------------------------------------------------------------------------------------------
# Class OpenbareRuimte
# Afgeleid van BAGObject
# Omschrijving Class voor het BAG-objecttype OpenbareRuimte.
# --------------------------------------------------------------------------------------------------------
class OpenbareRuimte(BAGObject):
openbareRuimteTypes = ['Weg', 'Water', 'Spoorbaan', 'Terrein', 'Kunstwerk', 'Landschappelijk gebied', 'Administratief gebied']
openbareRuimteStatusTypes = ['Naamgeving uitgegeven', 'Naamgeving ingetrokken']
def __init__(self):
BAGObject.__init__(self, "bag_LVC:OpenbareRuimte", "openbareruimte", "OPR")
self.voegToe(BAGstringAttribuut(80, "openbareRuimteNaam", "bag_LVC:openbareRuimteNaam"))
self.voegToe(BAGenumAttribuut(OpenbareRuimte.openbareRuimteStatusTypes, "openbareRuimteStatus", "bag_LVC:openbareruimteStatus"))
self.voegToe(BAGenumAttribuut(OpenbareRuimte.openbareRuimteTypes, "openbareRuimteType", "bag_LVC:openbareRuimteType"))
self.voegToe(BAGstringAttribuut(16, "gerelateerdeWoonplaats",
"bag_LVC:gerelateerdeWoonplaats/bag_LVC:identificatie"))
self.voegToe(BAGattribuut(80, "verkorteOpenbareRuimteNaam",
"nen5825:VerkorteOpenbareruimteNaam"))
# --------------------------------------------------------------------------------------------------------
# Class Nummeraanduiding
# Afgeleid van BAGObject
# Omschrijving Class voor het BAG-objecttype Nummeraanduiding.
# --------------------------------------------------------------------------------------------------------
class Nummeraanduiding(BAGObject):
nummeraanduidingStatusTypes = ['Naamgeving uitgegeven', 'Naamgeving ingetrokken']
verblijfsobjectTypes = ['Verblijfsobject', 'Standplaats', 'Ligplaats']
def __init__(self):
BAGObject.__init__(self, "bag_LVC:Nummeraanduiding", "nummeraanduiding", "NUM")
self.voegToe(BAGnumeriekAttribuut(5, "huisnummer", "bag_LVC:huisnummer"))
self.voegToe(BAGattribuut(1, "huisletter", "bag_LVC:huisletter"))
self.voegToe(BAGattribuut(4, "huisnummertoevoeging", "bag_LVC:huisnummertoevoeging"))
self.voegToe(BAGattribuut(6, "postcode", "bag_LVC:postcode"))
self.voegToe(BAGenumAttribuut(Nummeraanduiding.nummeraanduidingStatusTypes, "nummeraanduidingStatus",
"bag_LVC:nummeraanduidingStatus"))
self.voegToe(BAGenumAttribuut(Nummeraanduiding.verblijfsobjectTypes, "typeAdresseerbaarObject",
"bag_LVC:typeAdresseerbaarObject"))
self.voegToe(BAGstringAttribuut(16, "gerelateerdeOpenbareRuimte",
"bag_LVC:gerelateerdeOpenbareRuimte/bag_LVC:identificatie"))
self.voegToe(BAGstringAttribuut(16, "gerelateerdeWoonplaats",
"bag_LVC:gerelateerdeWoonplaats/bag_LVC:identificatie"))
def getAdresseerbaarObject(self):
adresseerbaarObject = None
typeAdresseerbaarObject = self.attribuut('typeAdresseerbaarObject').waarde().lower()
if typeAdresseerbaarObject == "ligplaats":
adresseerbaarObject = Ligplaats()
elif typeAdresseerbaarObject == "standplaats":
adresseerbaarObject = Standplaats()
elif typeAdresseerbaarObject == "verblijfsobject":
adresseerbaarObject = Verblijfsobject()
return adresseerbaarObject
def maakSelectAdresseerbaarObjectSQL(self):
sql = "SELECT DISTINCT identificatie"
sql += " FROM " + self.attribuut('typeAdresseerbaarObject').waarde().lower() + "actueelbestaand"
sql += " WHERE hoofdadres = " + str(self.attribuut('identificatie').waarde())
return sql
# --------------------------------------------------------------------------------------------------------
# Class BAGadresseerbaarObject
# Afgeleid van BAGObject
# Omschrijving Basisclass voor de adresseerbare objecten ligplaats, standplaats en verblijfsobject.
# Deze class definieert het hoofdadres en de nevenadressen.
# --------------------------------------------------------------------------------------------------------
class BAGadresseerbaarObject(BAGObject):
def __init__(self, tag, naam, objectType):
BAGObject.__init__(self, tag, naam, objectType)
self.voegToe(BAGstringAttribuut(16, "hoofdadres",
"bag_LVC:gerelateerdeAdressen/bag_LVC:hoofdadres/bag_LVC:identificatie"))
self.relaties.append(BAGrelatieAttribuut(self, "adresseerbaarobjectnevenadres",
16, "nevenadres",
"bag_LVC:gerelateerdeAdressen/bag_LVC:nevenadres/bag_LVC:identificatie",
["ligplaatsStatus", "standplaatsStatus", "verblijfsobjectStatus", "geom_valid"]))
# --------------------------------------------------------------------------------------------------------
# Class Ligplaats
# Afgeleid van BAGadresseerbaarObject
# Omschrijving Class voor het BAG-objecttype Ligplaats.
# --------------------------------------------------------------------------------------------------------
class Ligplaats(BAGadresseerbaarObject):
ligplaatsStatusTypes = ['Plaats aangewezen', 'Plaats ingetrokken']
def __init__(self):
BAGadresseerbaarObject.__init__(self, "bag_LVC:Ligplaats", "ligplaats", "LIG")
self.voegToe(BAGenumAttribuut(Ligplaats.ligplaatsStatusTypes, "ligplaatsStatus", "bag_LVC:ligplaatsStatus"))
self.voegToe(BAGpolygoon(3, "geovlak", "bag_LVC:ligplaatsGeometrie"))
self.voegToe(BAGgeometrieValidatie("geom_valid", "geovlak"))
def heeftGeometrie(self):
return True
# --------------------------------------------------------------------------------------------------------
# Class Standplaats
# Afgeleid van BAGadresseerbaarObject
# Omschrijving Class voor het BAG-objecttype Standplaats.
# --------------------------------------------------------------------------------------------------------
class Standplaats(BAGadresseerbaarObject):
standplaatsStatusTypes = ['Plaats aangewezen', 'Plaats ingetrokken']
def __init__(self):
BAGadresseerbaarObject.__init__(self, "bag_LVC:Standplaats", "standplaats", "STA")
self.voegToe(BAGenumAttribuut(Standplaats.standplaatsStatusTypes, "standplaatsStatus", "bag_LVC:standplaatsStatus"))
self.voegToe(BAGpolygoon(3, "geovlak", "bag_LVC:standplaatsGeometrie"))
self.voegToe(BAGgeometrieValidatie("geom_valid", "geovlak"))
def heeftGeometrie(self):
return True
# --------------------------------------------------------------------------------------------------------
# Class Verblijfsobject
# Afgeleid van BAGadresseerbaarObject
# Omschrijving Class voor het BAG-objecttype Verblijfsobject.
# --------------------------------------------------------------------------------------------------------
class Verblijfsobject(BAGadresseerbaarObject):
statusEnum = ['Verblijfsobject gevormd',
'Niet gerealiseerd verblijfsobject',
'Verblijfsobject in gebruik (niet ingemeten)',
'Verblijfsobject in gebruik',
'Verblijfsobject ingetrokken',
'Verblijfsobject buiten gebruik']
gebruiksdoelEnum = ['woonfunctie', 'bijeenkomstfunctie',
'celfunctie', 'gezondheidszorgfunctie',
'industriefunctie', 'kantoorfunctie',
'logiesfunctie', 'onderwijsfunctie',
'sportfunctie', 'winkelfunctie', 'overige gebruiksfunctie']
def __init__(self):
BAGadresseerbaarObject.__init__(self, "bag_LVC:Verblijfsobject", "verblijfsobject", "VBO")
self.voegToe(BAGenumAttribuut(Verblijfsobject.statusEnum, "verblijfsobjectStatus",
"bag_LVC:verblijfsobjectStatus"))
self.voegToe(BAGnumeriekAttribuut(6, "oppervlakteVerblijfsobject",
"bag_LVC:oppervlakteVerblijfsobject"))
# Het eerste gerelateerde pand (in principe kunnen er meer zijn, zie relatie)
# self.voegToe(BAGstringAttribuut(16, "gerelateerdPand1", "bag_LVC:gerelateerdPand/bag_LVC:identificatie"))
# Het eerste verblijfsdoel (in principe kunnen er meer zijn, zie relatie)
# self.voegToe(BAGnumeriekAttribuut(50, "gebruiksdoelVerblijfsobject1", "bag_LVC:gebruiksdoelVerblijfsobject"))
self.voegToe(BAGpoint(3, "geopunt", "bag_LVC:verblijfsobjectGeometrie"))
self.voegToe(BAGpolygoon(3, "geovlak", "bag_LVC:verblijfsobjectGeometrie"))
self.voegToe(BAGgeometrieValidatie("geom_valid", "geovlak"))
self.relaties.append(BAGenumRelatieAttribuut(self, "verblijfsobjectgebruiksdoel",
"gebruiksdoelVerblijfsobject",
"bag_LVC:gebruiksdoelVerblijfsobject",
["verblijfsobjectStatus", "geom_valid"],
Verblijfsobject.gebruiksdoelEnum))
self.relaties.append(BAGrelatieAttribuut(self, "verblijfsobjectpand",
16, "gerelateerdPand",
"bag_LVC:gerelateerdPand/bag_LVC:identificatie",
["verblijfsobjectStatus", "geom_valid"]))
def heeftGeometrie(self):
return True
# --------------------------------------------------------------------------------------------------------
# Class Pand
# Afgeleid van BAGObject
# Omschrijving Class voor het BAG-objecttype Pand.
# --------------------------------------------------------------------------------------------------------
class Pand(BAGObject):
statusEnum = ['Bouwvergunning verleend',
'Niet gerealiseerd pand',
'Bouw gestart',
'Pand in gebruik (niet ingemeten)',
'Pand in gebruik',
'Sloopvergunning verleend',
'Pand gesloopt',
'Pand buiten gebruik']
def __init__(self):
BAGObject.__init__(self, "bag_LVC:Pand", "pand", "PND")
self.voegToe(BAGenumAttribuut(Pand.statusEnum, "pandStatus", "bag_LVC:pandstatus"))
self.voegToe(BAGnumeriekAttribuut(4, "bouwjaar", "bag_LVC:bouwjaar"))
self.voegToe(BAGpolygoon(3, "geovlak", "bag_LVC:pandGeometrie"))
self.voegToe(BAGgeometrieValidatie("geom_valid", "geovlak"))
def heeftGeometrie(self):
return True
# --------------------------------------------------------------------------------------------------------
# Class GemeenteWoonplaatsRelatie
# Afgeleid van BAGObject
# Omschrijving Class voor koppeling Gemeente code naar Woonplaats code
# <gwr_LVC:GemeenteWoonplaatsRelatie>
# <gwr_LVC:tijdvakgeldigheid>
# <bagtype:begindatumTijdvakGeldigheid>2010100800000000</bagtype:begindatumTijdvakGeldigheid>
# </gwr_LVC:tijdvakgeldigheid>
# <gwr_LVC:gerelateerdeWoonplaats>
# <gwr_LVC:identificatie>2236</gwr_LVC:identificatie>
# </gwr_LVC:gerelateerdeWoonplaats>
# <gwr_LVC:gerelateerdeGemeente>
# <gwr_LVC:identificatie>0007</gwr_LVC:identificatie>
# </gwr_LVC:gerelateerdeGemeente>
# <gwr_LVC:status>definitief</gwr_LVC:status>
# </gwr_LVC:GemeenteWoonplaatsRelatie>
#
# --------------------------------------------------------------------------------------------------------
class GemeenteWoonplaatsRelatie(BAGObject):
statusEnum = ['voorlopig', 'definitief']
def __init__(self):
BAGObject.__init__(self, "gwr_LVC:GemeenteWoonplaatsRelatie", "gemeente_woonplaats", "GWR")
self.attributen = {}
self.attributen_volgorde = []
self.voegToe(BAGdatetimeAttribuut("begindatumtijdvakgeldigheid", "gwr_LVC:tijdvakgeldigheid/bagtype:begindatumTijdvakGeldigheid"))
self.voegToe(BAGdatetimeAttribuut("einddatumtijdvakgeldigheid", "gwr_LVC:tijdvakgeldigheid/bagtype:einddatumTijdvakGeldigheid"))
self.voegToe(BAGnumeriekAttribuut(4, "woonplaatscode", "gwr_LVC:gerelateerdeWoonplaats/gwr_LVC:identificatie"))
self.voegToe(BAGnumeriekAttribuut(4, "gemeentecode", "gwr_LVC:gerelateerdeGemeente/gwr_LVC:identificatie"))
self.voegToe(BAGenumAttribuut(GemeenteWoonplaatsRelatie.statusEnum, "status", "gwr_LVC:status"))
def heeftGeometrie(self):
return False
# An extremely simple Singleton Factory for BAGObjects
class BAGObjectFabriek:
# Singleton: sole static instance of BAGObjectFabriek to have a single BAGObjectFabriek object
bof = None
def __init__(self):
# Singleton: sole instance of Log o have a single Log object
BAGObjectFabriek.bof = self
# --------------------------------------------------------------------------------------------------------
# Geef een BAGObject van het juiste type bij het gegeven type.
# --------------------------------------------------------------------------------------------------------
def getBAGObjectBijType(self, objectType):
if objectType.upper() == "WPL":
return Woonplaats()
if objectType.upper() == "OPR":
return OpenbareRuimte()
if objectType.upper() == "NUM":
return Nummeraanduiding()
if objectType.upper() == "LIG":
return Ligplaats()
if objectType.upper() == "STA":
return Standplaats()
if objectType.upper() == "VBO":
return Verblijfsobject()
if objectType.upper() == "PND":
return Pand()
if objectType.upper() == "GWR":
return GemeenteWoonplaatsRelatie()
return None
# --------------------------------------------------------------------------------------------------------
# Geef een BAGObject van het juiste type bij de gegeven identificatie.
# Het type wordt afgeleid uit de identificatie.
# --------------------------------------------------------------------------------------------------------
def getBAGObjectBijIdentificatie(self, identificatie):
obj = None
id_int = int(identificatie)
id_str = str(id_int)
if len(id_str) == 4:
obj = Woonplaats()
elif id_str[3:5] == "30":
obj = OpenbareRuimte()
elif id_str[3:5] == "20":
obj = Nummeraanduiding()
elif id_str[3:5] == "02":
obj = Ligplaats()
elif id_str[3:5] == "03":
obj = Standplaats()
elif id_str[3:5] == "01":
obj = Verblijfsobject()
elif id_str[3:5] == "10":
obj = Pand()
if obj:
obj.attributen['identificatie'].setWaarde(identificatie)
return obj
# Creeer een BAGObject uit een DOM node
def BAGObjectBijXML(self, node):
tag = stripschema(node.tag)
if tag == 'Ligplaats':
bagObject = Ligplaats()
elif tag == 'Woonplaats':
bagObject = Woonplaats()
elif tag == 'Verblijfsobject':
bagObject = Verblijfsobject()
elif tag == 'OpenbareRuimte':
bagObject = OpenbareRuimte()
elif tag == 'Nummeraanduiding':
bagObject = Nummeraanduiding()
elif tag == 'Standplaats':
bagObject = Standplaats()
elif tag == 'Pand':
bagObject = Pand()
elif tag == 'GemeenteWoonplaatsRelatie':
bagObject = GemeenteWoonplaatsRelatie()
else:
return
bagObject.leesUitXML(node)
return bagObject
# Creeer een array van BAGObjecten uit DOM nodeList
def BAGObjectArrayBijXML(self, nodeList):
bagObjecten = []
for node in nodeList:
bagObject = self.BAGObjectBijXML(node)
if bagObject:
bagObjecten.append(bagObject)
return bagObjecten
# --------------------------------------------------------------------------------------------------------
# Class BAGRelatie
# Omschrijving Relatie van BAG naar BAG object
# --------------------------------------------------------------------------------------------------------
class BAGRelatie(BAGObject):
# Constructor
def __init__(self, tag="", naam="", objectType=""):
self.attributen = {}
self.attributen_volgorde = []
self.voegToe(BAGstringAttribuut(16, "identificatie", "bag_LVC:identificatie"))
self.voegToe(BAGbooleanAttribuut("aanduidingRecordInactief", "bag_LVC:aanduidingRecordInactief"))
self.voegToe(BAGintegerAttribuut("aanduidingRecordCorrectie", "bag_LVC:aanduidingRecordCorrectie"))
self.voegToe(BAGdatetimeAttribuut("begindatumTijdvakGeldigheid", "bag_LVC:tijdvakgeldigheid/bagtype:begindatumTijdvakGeldigheid"))
self.voegToe(BAGdatetimeAttribuut("einddatumTijdvakGeldigheid", "bag_LVC:tijdvakgeldigheid/bagtype:einddatumTijdvakGeldigheid"))
self.relaties = []
self.origineelObj = None
self._tag = tag
self._naam = naam
self._objectType = objectType
# --------------------------------------------------------------------------------------------------------
# Class VerblijfsObjectPand
# Omschrijving Relatie van Verblijfsobject naar Pand
# --------------------------------------------------------------------------------------------------------
class VerblijfsObjectPand(BAGRelatie):
def __init__(self):
BAGRelatie.__init__(self, "", "verblijfsobjectpand", "")
self.voegToe(BAGstringAttribuut(16, "gerelateerdpand", "bag_LVC:gerelateerdPand"))
# --------------------------------------------------------------------------------------------------------
# Class AdresseerbaarObjectNevenAdres
# Omschrijving Relatie van Ligplaats, Standplaats of Verblijfsobject naar Nevenadres
# --------------------------------------------------------------------------------------------------------
class AdresseerbaarObjectNevenAdres(BAGRelatie):
def __init__(self):
BAGRelatie.__init__(self, "", "adresseerbaarobjectnevenadres", "")
self.voegToe(BAGstringAttribuut(16, "nevenadres", "bag_LVC:nevenadres"))
# --------------------------------------------------------------------------------------------------------
# Class VerblijfsObjectGebruiksdoel
# Omschrijving Relatie van Verblijfsobject naar Gebruiksdoelen
# --------------------------------------------------------------------------------------------------------
class VerblijfsObjectGebruiksdoel(BAGRelatie):
def __init__(self):
BAGRelatie.__init__(self, "", "verblijfsobjectgebruiksdoel", "")
self.voegToe(BAGenumAttribuut(Verblijfsobject.gebruiksdoelEnum, "gebruiksdoelverblijfsobject", "bag_LVC:gebruiksdoelVerblijfsobject"))
BAGObjectFabriek()
|
opengeogroep/NLExtract
|
bag/src/bagobject.py
|
Python
|
gpl-3.0
| 30,138
|
[
"Elk"
] |
73c41f5726b500a36eff20dfa99ddf4c721ca4cbb325d5c36640eb46d4a61292
|
"""Creates a 2D hyperspectrum consisting of two gaussians and plots it.
This example can serve as starting point to test other functionalities on the
simulated hyperspectrum.
"""
# Create an empty spectrum
s = signals.Spectrum(np.zeros((32, 32, 1024)))
# Generate some simple data: two Gaussians with random centers and area
# First we create a model
m = s.create_model()
# Define the first gaussian
gs1 = components.Gaussian()
# Add it to the model
m.append(gs1)
# Set the sparameters
gs1.sigma.value = 10
# Make the center vary in the -5,5 range around 128
gs1.centre.map['values'][:] = 256 + (np.random.random((32, 32)) - 0.5) * 10
gs1.centre.map['is_set'][:] = True
# Make the area vary between 0 and 10000
gs1.A.map['values'][:] = 10000 * np.random.random((32, 32))
gs1.A.map['is_set'][:] = True
# Second gaussian
gs2 = components.Gaussian()
# Add it to the model
m.append(gs2)
# Set the parameters
gs2.sigma.value = 20
# Make the center vary in the -10,10 range around 768
gs2.centre.map['values'][:] = 768 + (np.random.random((32, 32)) - 0.5) * 20
gs2.centre.map['is_set'][:] = True
# Make the area vary between 0 and 20000
gs2.A.map['values'][:] = 20000 * np.random.random((32, 32))
gs2.A.map['is_set'][:] = True
# Create the dataset
s_model = m.as_signal()
# Add noise
s_model.set_signal_origin("simulation")
s_model.add_poissonian_noise()
# Plot the result
s_model.plot()
show()
|
sillvan/hyperspy
|
examples/simple_simulations/two_gaussians.py
|
Python
|
gpl-3.0
| 1,406
|
[
"Gaussian"
] |
7bb63bcea112750faf640eac4cb45b7c93d18917f23fdd0aece92e896bb22f67
|
"""
ReportCLI class implementing command line interface to DIRAC Accounting
ReportGenerator Service. It is not complete yet
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import sys
import datetime
from DIRAC.Core.Base.CLI import CLI, colorize
from DIRAC.Core.Utilities import ExitCallback
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC import gLogger
class ReportCLI( CLI ):
def __init__( self ):
CLI.__init__( self )
self.do_connect( None )
ExitCallback.registerExitCallback( self.do_quit )
def start( self ):
"""
Start the command loop
"""
if not self.connected:
gLogger.error( "Client is not connected" )
try:
self.cmdloop()
except KeyboardInterrupt, v:
gLogger.warn( "Received a keyboard interrupt." )
self.do_quit( "" )
def do_connect( self, args ):
"""
Tries to connect to the server
Usage: connect
"""
gLogger.info( "Trying to connect to server" )
self.connected = False
self.prompt = "(%s)> " % colorize( "Not connected", "red" )
retVal = ReportsClient().pingService()
if retVal[ 'OK' ]:
self.prompt = "(%s)> " % colorize( "Connected", "green" )
self.connected = True
def printComment( self, comment ):
commentList = comment.split( "\n" )
for commentLine in commentList[ :-1 ]:
print("# %s" % commentLine.strip())
def showTraceback( self ):
import traceback
type, value = sys.exc_info()[:2]
print("________________________\n")
print("Exception", type, ":", value)
traceback.print_tb( sys.exc_info()[2] )
print("________________________\n")
def __getDatetimeFromArg( self, dtString ):
if len( dtString ) != 12:
return False
dt = datetime.datetime( year = int( dtString[0:4] ),
month = int( dtString[4:6] ),
day = int( dtString[6:8] ) )
dt += datetime.timedelta( hours = int( dtString[ 8:10 ] ),
minutes = int( dtString[ 10:12 ] ) )
return dt
|
petricm/DIRAC
|
AccountingSystem/Client/ReportCLI.py
|
Python
|
gpl-3.0
| 2,081
|
[
"DIRAC"
] |
a053dd9ddf3090d1e9bf7d440d33ea56fd50073be0eb1e83f51443d4c6efd574
|
from math import pi
from time import sleep
from euclid import Vector2
from imutils.video import VideoStream
from approxeng.holochassis.chassis import Motion
from approxeng.picamera import find_lines
from approxeng.viridia import IntervalCheck
from approxeng.viridia.task import Task
class LineFollowerTask(Task):
"""
Follow all the lines!
"""
def __init__(self, linear_speed=100, turn_speed=pi / 2, enable_drive=True, threshold=50, scan_region_height=20,
scan_region_position=0, scan_region_width_pad=0, min_detection_area=40, invert=True,
blur_kernel_size=9, physical_scan_width=140, physical_scan_distance=70, camera_resolution=128):
"""
Create a new line follower task
:param linear_speed:
The linear speed to move when we have a line in sight, mm/s, defaults to 100
:param turn_speed:
The angular speed to use when turning to locate the line, radians/second, defaults to pi/2
:param enable_drive:
True to enable drive operation, false to just show the lights, defaults to True
:param threshold:
The threshold used to convert to black and white after a gaussian blur is applied, defaults to 100
:param scan_region_height:
The height in pixels of the region to use, defaults to 20
:param scan_region_position:
The position of the region relative to the entire frame. 0 is at the top, 1.0 is as far towards the bottom
as it will go. Defaults to 0, scanning the top 'scan_region_height' pixels of the image
:param scan_region_width_pad:
The number of pixels to discard at either edge of the region, defaults to 0
:param min_detection_area:
The minimum area of detected moments, any feature below this size will be ignored. Defaults to 40 pixels
:param invert:
Boolean - set this to true if your pi camera is upside-down and you therefore want to have -1.0 at the right
hand edge of the image rather than the left, defaults to True for Viridia's camera mount
:param blur_kernel_size:
Size of the kernel used when applying the gaussian blur. Defaults to 9
:param physical_scan_width:
The width in mm of the region the camera is scanning for lines, used to map the line finder output to a
physical distance. Defaults to 140
:param physical_scan_distance:
The distance from the robot's centre of the scan region. This can be artificially shortened to create more
aggressive turning behaviour. Defaults to 70 even though the actual distance for Viridia is more like 150
:param camera_resolution:
The resolution of the square image frame used by the camera, defaults to 128 - we really don't need high
resolutions for this algorithm
"""
super(LineFollowerTask, self).__init__(task_name='Line follower')
self.stream = None
self.last_line_to_the_right = True
self.display_interval = IntervalCheck(interval=0.1)
self.linear_speed = linear_speed
self.enable_drive = enable_drive
self.turn_speed = turn_speed
self.threshold = threshold
self.scan_region_height = scan_region_height
self.scan_region_position = scan_region_position
self.scan_region_width_pad = scan_region_width_pad
self.min_detection_area = min_detection_area
self.invert = invert
self.blur_kernel_size = blur_kernel_size
self.physical_scan_width = physical_scan_width
self.physical_scan_distance = physical_scan_distance
self.camera_resolution = camera_resolution
def init_task(self, context):
"""
Create the video stream, which should activate the camera, and then pause for a couple of seconds
to let it gather its thoughts.
"""
# Set up lighting
context.feather.set_lighting_mode(2)
context.feather.set_direction(-2.0)
context.feather.set_ring_hue(0)
# Create stream and pause
self.stream = VideoStream(usePiCamera=True, resolution=(self.camera_resolution, self.camera_resolution)).start()
for i in range(0, 4):
# We really need to make sure the drive is enabled!
if self.enable_drive:
context.drive.enable_drive()
sleep(0.5)
context.feather.set_ring_hue(200)
# The camera is on the back of the robot, so set the front to be at PI radians
context.drive.front = pi
# Disable any motion limit we may have in action, it'll just confuse things
context.drive.set_motion_limit(None)
# Reset dead reckoning, we don't really use it but it'll save confusion later if this changes
context.drive.reset_dead_reckoning()
# Determine whether, if we lose the line, we should rotate clockwise (True) or counter-clockwise (False)
self.last_line_to_the_right = True
def poll_task(self, context, tick):
frame = self.stream.read()
lines = find_lines(image=frame, threshold=self.threshold, scan_region_height=self.scan_region_height,
scan_region_position=self.scan_region_position,
scan_region_width_pad=self.scan_region_width_pad, min_detection_area=self.min_detection_area,
invert=self.invert, blur_kernel_size=self.blur_kernel_size)
if self.enable_drive:
if len(lines) > 0:
"""
Found at least one line, pick the left-most. Lines are detected about 15cm from the centre of the robot,
and far left is about 7cm to the left so multiplying the x centroid of the first line segment by 70
gives us the x coordinate of the target in mm.
"""
target_x = lines[0] * self.physical_scan_width / 2
target_y = self.physical_scan_distance
context.drive.drive_at(x=target_x, y=target_y, speed=self.linear_speed, turn_speed=self.turn_speed)
self.last_line_to_the_right = target_x >= 0
else:
# Can't see a line, so rotate towards the side where we last saw one!
if self.last_line_to_the_right:
context.drive.set_motion(Motion(translation=Vector2(0, 0), rotation=self.turn_speed))
else:
context.drive.set_motion(Motion(translation=Vector2(0, 0), rotation=-self.turn_speed))
if self.display_interval.should_run():
if len(lines) > 0:
context.feather.set_direction(lines[0])
else:
context.feather.set_direction(-2)
def shutdown(self, context):
context.display.show('Disposing of streams')
context.drive.disable_drive()
context.drive.front = 0
if self.stream is not None:
self.stream.stop()
self.stream = None
|
ApproxEng/viridia
|
src/python/approxeng/viridia/tasks/camera.py
|
Python
|
apache-2.0
| 7,092
|
[
"Gaussian"
] |
e1f1c255ccbb5e2b85f1da9ababf04ee3694b39f2e4befc0ee3944848916a615
|
import sys
import antlr
class Visitor(antlr.ASTVisitor):
def __init__(self,*args):
super(Visitor,self).__init__(*args)
self.level = 0
if not args:
self.cout = sys.stdout
return
if isinstance(args[0],file):
self.cout = args[0]
return
assert 0
def tabs(self):
print " " * self.level
def printf(self,fmt,*args):
if not args:
sys.stdout.write(fmt)
return
argv = tuple(args)
self.cout.write(fmt % argv)
def flush(self):
self.cout.flush()
def visit1(self,node):
if not node:
self.printf(" nil ")
return
c = node.getType()
t = node.getText()
k = node.getFirstChild()
s = node.getNextSibling()
self.printf("( <%s> ",c)
if t:
self.printf(" %s ",t)
self.visit1(k);
self.visit1(s);
self.printf(")")
def visit(self,node):
self.visit1(node);
self.printf("\n")
class CalcAST(antlr.BaseAST):
def __init__(self,*args):
antlr.BaseAST.__init__(self)
class BinaryOperatorAST(CalcAST):
def __init__(self,*args):
CalcAST.__init__(self,*args)
def left(self):
return self.getFirstChild()
def right(self):
t = self.left();
if not t: return None
return t.getNextSibling()
def c2(self):
t = self.left()
if t: t = t.getNextSibling()
assert t
return t
### A simple node to represent PLUS operation
class PLUSNode(BinaryOperatorAST):
def __init__(self,*args):
BinaryOperatorAST.__init__(self,*args)
### Compute value of subtree; this is heterogeneous part :)
def value(self):
left = self.left()
assert self
r = self.c2()
assert r
return left.value() + r.value()
def toString(self):
return " +";
def __str__(self):
return self.toString()
def __repr__(self):
return str(self)
### A simple node to represent MULT operation
class MULTNode(BinaryOperatorAST):
def __init__(self,*args):
BinaryOperatorAST.__init__(self,*args)
# Compute value of subtree; this is heterogeneous part :)
def value(self):
return self.left().value() * self.c2().value()
def toString(self):
return " *";
def __str__(self):
return self.toString()
def __repr__(self):
return str(self)
### A simple node to represent an INT
class INTNode(CalcAST):
def __init__(self,*args):
CalcAST.__init__(self,*args)
self.v = 0
if args and isinstance(args[0],antlr.Token):
self.v = int(args[0].getText())
# Compute value of subtree; this is heterogeneous part :)
def value(self):
return self.v
def toString(self):
return " " + str(self.v)
def main():
import hetero_l
import hetero_p
L = hetero_l.Lexer()
P = hetero_p.Parser(L)
P.setFilename(L.getFilename())
### Parse the input expression
try:
P.expr()
except antlr.ANTLRException, ex:
print "*** error(s) while parsing."
print ">>> exit(1)"
import sys
sys.exit(1)
ast = P.getAST()
if not ast:
print "stop - no AST generated."
import sys
sys.exit(1)
###show tree
print "Tree: " + ast.toStringTree()
print "List: " + ast.toStringList()
print "Node: " + ast.toString()
print "visit>>"
visitor = Visitor()
visitor.visit(ast);
print "visit<<"
### compute value and return
r = ast.value()
print "value is", r
if __name__ == "__main__":
main()
|
rmartinho/boo
|
lib/antlr-2.7.5/examples/python/heteroAST/hetero.py
|
Python
|
bsd-3-clause
| 3,610
|
[
"VisIt"
] |
ef902228eace828d73dd43b85050bbf2cec10370270b63904c0c3b595681ce45
|
##########################################################################
# this script was generated by openmm-builder. to customize it further,
# you can save the file to disk and edit it with your favorite editor.
##########################################################################
from __future__ import print_function
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
from sys import stdout
print("[START] Application is now running")
pdb = app.PDBFile('input.pdb')
print("[STATUS] Loaded model")
forcefield = app.ForceField('amber03.xml', 'amber03_obc.xml')
print("[STATUS] Loaded force field")
system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.NoCutoff,
constraints=None, rigidWater=False)
print("[STATUS] Created system")
integrator = mm.LangevinIntegrator(300*unit.kelvin, 91/unit.picoseconds,
1.0*unit.femtoseconds)
print("[STATUS] Created integrator")
try:
platform = mm.Platform.getPlatformByName('CPU')
except Exception as e:
print("[ERROR] Could not load platform CPU. Running Reference")
platform = mm.Platform.getPlatformByName("Reference")
simulation = app.Simulation(pdb.topology, system, integrator, platform)
print("[STATUS] Set up compute platform")
simulation.context.setPositions(pdb.positions)
print("[STATUS] Set atomic positions")
print('[STATUS] Minimizing...')
simulation.minimizeEnergy()
print('[STATUS] Equilibrating...')
simulation.step(100)
simulation.reporters.append(app.DCDReporter('trajectory.dcd', 1000))
simulation.reporters.append(app.StateDataReporter(stdout, 1000, step=True,
potentialEnergy=True, totalEnergy=True, temperature=True, separator='\t'))
print("[STATUS] Set up reporters")
print('[STATUS] Running Production...')
increment = 1000
for i in range(0,100000,increment):
print("[STATUS] Step %s" % (i))
simulation.step(increment)
print('[END] Done!')
|
machristie/airavata
|
sandbox/simstream/example/openmm_example/application/alanine_dipeptide.py
|
Python
|
apache-2.0
| 1,902
|
[
"OpenMM"
] |
4cf54bdfb1442fce23813f4e3461e96ef0560c7b439837b3b7d8db34aa5d7502
|
from __future__ import absolute_import
from __future__ import print_function
import six
# {{{ logic that preserves NotImplemented
def not_ni(a):
if a == NotImplemented:
return NotImplemented
else:
return not a
def and_ni(a, b):
if a == NotImplemented or b == NotImplemented:
return NotImplemented
else:
return a and b
def or_ni(a, b):
if a == NotImplemented or b == NotImplemented:
return NotImplemented
else:
return a or b
# }}}
# {{{ query objects
class Query(object):
def __str__(self):
return self.visit(StringifyVisitor())
def __repr__(self):
return self.visit(ReprVisitor())
# deriving classes need only implement __lt__ and __eq__
def __ne__(self, other):
return not_ni(self.__eq__(other))
def __gt__(self, other):
return not_ni(self.__le__(other))
def __le__(self, other):
return or_ni(
self.__lt__(other),
self.__eq__(other))
def __ge__(self, other):
return not_ni(self.__lt__(other))
class IdQuery(Query):
def __init__(self, id):
self.id = id
def visit(self, visitor, *args):
return visitor.visit_id_query(self, *args)
def __eq__(self, other):
return isinstance(other, IdQuery) and self.id == other.id
def __lt__(self, other):
if not isinstance(other, TagQuery):
return type(self).__name__ < type(other).__name__
else:
return self.id < other.id
class TagQuery(Query):
def __init__(self, name):
self.name = name
def visit(self, visitor, *args):
return visitor.visit_tag_query(self, *args)
def __eq__(self, other):
return isinstance(other, TagQuery) and self.name == other.name
def __lt__(self, other):
if not isinstance(other, TagQuery):
return type(self).__name__ < type(other).__name__
else:
return self.name < other.name
class TagWildcardQuery(Query):
def __init__(self, name):
self.name = name
def visit(self, visitor, *args):
return visitor.visit_tag_wildcard_query(self, *args)
def __eq__(self, other):
return isinstance(other, TagWildcardQuery) and self.name == other.name
def __lt__(self, other):
if not isinstance(other, TagWildcardQuery):
return type(self).__name__ < type(other).__name__
else:
return self.name < other.name
class FulltextQuery(Query):
def __init__(self, substr):
self.substr = substr
def visit(self, visitor, *args):
return visitor.visit_fulltext_query(self, *args)
def __eq__(self, other):
return isinstance(other, FulltextQuery) and self.substr == other.substr
def __lt__(self, other):
if not isinstance(other, FulltextQuery):
return type(self).__name__ < type(other).__name__
else:
return self.substr < other.substr
class NotQuery(Query):
def __init__(self, child):
self.child = child
def visit(self, visitor, *args):
return visitor.visit_not_query(self, *args)
def __eq__(self, other):
return isinstance(other, NotQuery) and self.child == other.child
def __lt__(self, other):
if not isinstance(other, NotQuery):
return type(self).__name__ < type(other).__name__
else:
return self.child < other.child
class AndQuery(Query):
def __init__(self, children):
self.children = children[:]
self.children.sort()
def visit(self, visitor, *args):
return visitor.visit_and_query(self, *args)
def __eq__(self, other):
return isinstance(other, AndQuery) and self.children == other.children
def __lt__(self, other):
if not isinstance(other, AndQuery):
return type(self).__name__ < type(other).__name__
else:
return self.children < other.children
class OrQuery(Query):
def __init__(self, children):
self.children = children[:]
self.children.sort()
def visit(self, visitor, *args):
return visitor.visit_or_query(self, *args)
def __eq__(self, other):
return (isinstance(other, OrQuery)
and self.children == other.children)
def __lt__(self, other):
if not isinstance(other, OrQuery):
return type(self).__name__ < type(other).__name__
else:
return self.children < other.children
class DateQuery(Query):
def __init__(self, is_before, timestamp):
self.is_before = is_before
self.timestamp = timestamp
def visit(self, visitor, *args):
return visitor.visit_date_query(self, *args)
def __eq__(self, other):
return (
isinstance(other, DateQuery)
and self.is_before == other.is_before
and self.timestamp == other.timestamp)
def __lt__(self, other):
if not isinstance(other, OrQuery):
return type(self).__name__ < type(other).__name__
else:
return ((self.is_before, self.timestamp)
<
(other.is_before, other.timestamp))
class StatelessQueryTerminal(Query):
def __eq__(self, other):
return isinstance(other, type(self))
def __lt__(self, other):
if not isinstance(other, type(self)):
return type(self).__name__ < type(other).__name__
else:
return False
class DatedQuery(StatelessQueryTerminal):
def visit(self, visitor, *args):
return visitor.visit_dated_query(self, *args)
class NoHideQuery(StatelessQueryTerminal):
def visit(self, visitor, *args):
return visitor.visit_no_hide_query(self, *args)
class SortByDateQuery(StatelessQueryTerminal):
def visit(self, visitor, *args):
return visitor.visit_sort_by_date(self, *args)
# }}}
# {{{ normalizing query constructors
def make_tag_query(tag):
if "?" in tag or "*" in tag:
return TagWildcardQuery(tag)
else:
return TagQuery(tag)
def make_not_query(child):
if isinstance(child, NotQuery):
return child.child
elif isinstance(child, AndQuery):
return OrQuery([make_not_query(subchild) for subchild in child.children])
elif isinstance(child, OrQuery):
return AndQuery([make_not_query(subchild) for subchild in child.children])
else:
return NotQuery(child)
def _make_flattened_children_query(klass, children):
new_children = []
for ch in children:
if isinstance(ch, klass):
new_children.extend(ch.children)
else:
new_children.append(ch)
return klass(new_children)
def make_and_query(children):
return _make_flattened_children_query(AndQuery, children)
def make_or_query(children):
return _make_flattened_children_query(OrQuery, children)
# }}}
# {{{ operator precedence
_PREC_OR = 10
_PREC_AND = 20
_PREC_NOT = 30
# }}}
# {{{ query visitors
class StringifyVisitor(object):
def visit_id_query(self, q, enclosing_prec=0):
return "id(%d)" % q.id
def visit_tag_query(self, q, enclosing_prec=0):
return q.name
def visit_tag_wildcard_query(self, q, enclosing_prec=0):
return q.name
def visit_fulltext_query(self, q, enclosing_prec=0):
return '"%s"' % q.substr
def visit_not_query(self, q, enclosing_prec=0):
if isinstance(q.child, TagQuery):
return '-%s' % q.child.name
else:
if enclosing_prec > _PREC_NOT:
return "(not %s)" % q.child.visit(self, _PREC_NOT)
else:
return "not %s" % q.child.visit(self, _PREC_NOT)
def visit_and_query(self, q, enclosing_prec=0):
me = " ".join(child.visit(self, _PREC_AND)
for child in q.children)
if enclosing_prec > _PREC_AND:
return "(%s)" % me
else:
return me
def visit_or_query(self, q, enclosing_prec=0):
me = " or ".join(child.visit(self, _PREC_OR)
for child in q.children)
if enclosing_prec > _PREC_OR:
return "(%s)" % me
else:
return me
def visit_date_query(self, q, enclosing_prec=0):
if q.is_before:
name = "before"
else:
name = "after"
from datetime import datetime
return "%s(%s)" % (name,
datetime.fromtimestamp(q.timestamp).strftime("%d %b %Y %T"))
def visit_dated_query(self, q, enclosing_prec=0):
return "dated"
def visit_no_hide_query(self, q, enclosing_prec=0):
return "nohide"
def visit_sort_by_date(self, q, enclosing_prec=0):
return "sortbydate"
class ReprVisitor(object):
def visit_tag_query(self, q):
return "%s(%s)" % (type(q).__name__, repr(q.name))
def visit_tag_wildcard_query(self, q):
return "%s(%s)" % (type(q).__name__, repr(q.name))
def visit_fulltext_query(self, q):
return "%s(%s)" % (type(q).__name__, repr(q.substr))
def visit_not_query(self, q):
return "%s(%s)" % (type(q).__name__, repr(q.child))
def visit_and_query(self, q):
return "%s(%s)" % (type(q).__name__, repr(q.children))
def visit_or_query(self, q):
return "%s(%s)" % (type(q).__name__, repr(q.children))
def visit_date_query(self, q):
return "%s(%s, %s)" % (q.__class__.__name__,
repr(q.is_before), repr(q.timestamp))
def visit_dated_query(self, q):
return "%s()" % type(q).__name__
visit_no_hide_query = visit_dated_query
visit_sort_by_date = visit_dated_query
class TagListVisitor(object):
def visit_id_query(self, q):
return []
def visit_tag_query(self, q):
return [q.name]
def visit_tag_wildcard_query(self, q):
return [q.name]
def visit_fulltext_query(self, q):
return []
def visit_not_query(self, q):
return q.child.visit(self)
def visit_and_query(self, q):
result = []
for ch in q.children:
result += ch.visit(self)
return result
def visit_or_query(self, q):
result = []
for ch in q.children:
result += ch.visit(self)
return result
def visit_date_query(self, q):
return []
visit_dated_query = visit_date_query
visit_no_hide_query = visit_date_query
visit_sort_by_date = visit_date_query
# }}}
# {{{ lexer data
_and = intern("and")
_or = intern("or")
_not = intern("not")
_openpar = intern("openpar")
_closepar = intern("closepar")
_id = intern("id")
_before = intern("before")
_after = intern("after")
_dated = intern("dated")
_nohide = intern("nohide")
_sortbydate = intern("sortbydate")
_tag = intern("tag")
_negtag = intern("negtag")
_fulltext = intern("fulltext")
_whitespace = intern("whitespace")
from pytools.lex import RE
_LEX_TABLE = [
(_and, RE(r"and\b")),
(_or, RE(r"or\b")),
(_not, RE(r"not\b")),
(_openpar, RE(r"\(")),
(_closepar, RE(r"\)")),
(_id, RE(r"id\(([0-9]+)\)")),
(_before, RE(r"before\(([-:, A-Za-z0-9]+)\)")),
(_after, RE(r"after\(([-:, A-Za-z0-9]+)\)")),
(_dated, RE(r"dated\b")),
(_nohide, RE(r"nohide\b")),
(_sortbydate, RE(r"sortbydate\b")),
(_tag, RE(r"[.\w?*]+")),
(_negtag, RE(r"-[.\w?*]+")),
(_fulltext, RE(r'".*?(?!\\\\)"')),
(_whitespace, RE("[ \t]+")),
]
_STATELESS_TERMINALS = {
_dated: DatedQuery(),
_nohide: NoHideQuery(),
_sortbydate: SortByDateQuery()
}
_TERMINALS = (
[_tag, _negtag, _fulltext, _id, _before, _after]
+ list(six.iterkeys(_STATELESS_TERMINALS)))
# }}}
# {{{parser
def parse_query(expr_str):
def parse_terminal(pstate):
next_tag = pstate.next_tag()
if next_tag is _tag:
return make_tag_query(pstate.next_str_and_advance())
elif next_tag is _negtag:
return NotQuery(make_tag_query(pstate.next_str_and_advance()[1:]))
elif next_tag is _fulltext:
return FulltextQuery(pstate.next_str_and_advance()[1:-1])
elif next_tag is _dated:
pstate.advance()
return DatedQuery()
elif next_tag in _STATELESS_TERMINALS:
pstate.advance()
return _STATELESS_TERMINALS[next_tag]
elif next_tag in [_id]:
result = IdQuery(int(pstate.next_match_obj().group(1)))
pstate.advance()
return result
elif next_tag in [_before, _after]:
from parsedatetime.parsedatetime import Calendar
cal = Calendar()
timetup = cal.parse(pstate.next_match_obj().group(1))
pstate.advance()
import time
return DateQuery(next_tag == _before, time.mktime(timetup[0]))
else:
pstate.expected("terminal")
def inner_parse(pstate, min_precedence=0):
pstate.expect_not_end()
if pstate.is_next(_not):
pstate.advance()
left_query = make_not_query(inner_parse(pstate, _PREC_NOT))
elif pstate.is_next(_openpar):
pstate.advance()
left_query = inner_parse(pstate)
pstate.expect(_closepar)
pstate.advance()
else:
left_query = parse_terminal(pstate)
did_something = True
while did_something:
did_something = False
if pstate.is_at_end():
return left_query
next_tag = pstate.next_tag()
if next_tag is _and and _PREC_AND > min_precedence:
pstate.advance()
left_query = make_and_query(
[left_query, inner_parse(pstate, _PREC_AND)])
did_something = True
elif next_tag is _or and _PREC_OR > min_precedence:
pstate.advance()
left_query = make_or_query(
[left_query, inner_parse(pstate, _PREC_OR)])
did_something = True
elif (next_tag in _TERMINALS + [_not, _openpar]
and _PREC_AND > min_precedence):
left_query = make_and_query(
[left_query, inner_parse(pstate, _PREC_AND)])
did_something = True
return left_query
from pytools.lex import LexIterator, lex
pstate = LexIterator(
[(tag, s, idx, matchobj)
for (tag, s, idx, matchobj) in lex(
_LEX_TABLE, expr_str, match_objects=True)
if tag is not _whitespace], expr_str)
if pstate.is_at_end():
return TagQuery(u"home")
result = inner_parse(pstate)
if not pstate.is_at_end():
pstate.raise_parse_error("leftover input after completed parse")
return result
# }}}
# {{{ 'test'
if __name__ == "__main__":
v = parse_query('not (yuck "yy!" and (not (not them and (yes or me)) and you))')
print(v)
v2 = parse_query(str(v))
print(v2)
v3 = parse_query(str(v2))
print(v3)
print(parse_query('yuck bluck'))
print(parse_query(''))
v = parse_query('not before(yesterday 5 am)')
print(v)
v2 = parse_query(str(v))
print(v2)
v = parse_query('pic ("test" or "validation")')
print(repr(v))
# }}}
# vim: foldmethod=marker
|
inducer/synoptic
|
synoptic/query.py
|
Python
|
mit
| 15,386
|
[
"VisIt"
] |
5117a09965247d1607db36b5450322fdb888fde7713119076a4afa1616e42252
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the Account Settings page.
"""
from unittest import skip
from nose.plugins.attrib import attr
from bok_choy.web_app_test import WebAppTest
from bok_choy.page_object import XSS_INJECTION
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.dashboard import DashboardPage
from ..helpers import EventsTestMixin
class AccountSettingsTestMixin(EventsTestMixin, WebAppTest):
"""
Mixin with helper methods to test the account settings page.
"""
CHANGE_INITIATED_EVENT_NAME = u"edx.user.settings.change_initiated"
USER_SETTINGS_CHANGED_EVENT_NAME = 'edx.user.settings.changed'
ACCOUNT_SETTINGS_REFERER = u"/account/settings"
def visit_account_settings_page(self):
"""
Visit the account settings page for the current user, and store the page instance
as self.account_settings_page.
"""
# pylint: disable=attribute-defined-outside-init
self.account_settings_page = AccountSettingsPage(self.browser)
self.account_settings_page.visit()
self.account_settings_page.wait_for_ajax()
def log_in_as_unique_user(self, email=None, full_name=None):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username, email=email, full_name=full_name).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def settings_changed_event_filter(self, event):
"""Filter out any events that are not "settings changed" events."""
return event['event_type'] == self.USER_SETTINGS_CHANGED_EVENT_NAME
def expected_settings_changed_event(self, setting, old, new, table=None):
"""A dictionary representing the expected fields in a "settings changed" event."""
return {
'username': self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': self.user_id,
'setting': setting,
'old': old,
'new': new,
'truncated': [],
'table': table or 'auth_userprofile'
}
}
def settings_change_initiated_event_filter(self, event):
"""Filter out any events that are not "settings change initiated" events."""
return event['event_type'] == self.CHANGE_INITIATED_EVENT_NAME
def expected_settings_change_initiated_event(self, setting, old, new, username=None, user_id=None):
"""A dictionary representing the expected fields in a "settings change initiated" event."""
return {
'username': username or self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': user_id or self.user_id,
'setting': setting,
'old': old,
'new': new,
}
}
def get_settings_page_url(self):
"""The absolute URL of the account settings page given the test context."""
return self.relative_path_to_absolute_uri(self.ACCOUNT_SETTINGS_REFERER)
def assert_no_setting_changed_event(self):
"""Assert no setting changed event has been emitted thus far."""
self.assert_no_matching_events_were_emitted({'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME})
@attr('shard_8')
class DashboardMenuTest(AccountSettingsTestMixin, WebAppTest):
"""
Tests that the dashboard menu works correctly with the account settings page.
"""
def test_link_on_dashboard_works(self):
"""
Scenario: Verify that the "Account" link works from the dashboard.
Given that I am a registered user
And I visit my dashboard
And I click on "Account" in the top drop down
Then I should see my account settings page
"""
self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Account', dashboard_page.username_dropdown_link_text)
dashboard_page.click_account_settings_link()
@attr('shard_8')
class AccountSettingsPageTest(AccountSettingsTestMixin, WebAppTest):
"""
Tests that verify behaviour of the Account Settings page.
"""
SUCCESS_MESSAGE = 'Your changes have been saved.'
def setUp(self):
"""
Initialize account and pages.
"""
super(AccountSettingsPageTest, self).setUp()
self.full_name = XSS_INJECTION
self.username, self.user_id = self.log_in_as_unique_user(full_name=self.full_name)
self.visit_account_settings_page()
def test_page_view_event(self):
"""
Scenario: An event should be recorded when the "Account Settings"
page is viewed.
Given that I am a registered user
And I visit my account settings page
Then a page view analytics event should be recorded
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.user.settings.viewed'}, number_of_matches=1)
self.assert_events_match(
[
{
'event': {
'user_id': self.user_id,
'page': 'account',
'visibility': None
}
}
],
actual_events
)
def test_all_sections_and_fields_are_present(self):
"""
Scenario: Verify that all sections and fields are present on the page.
"""
expected_sections_structure = [
{
'title': 'Basic Account Information',
'fields': [
'Username',
'Full Name',
'Email Address',
'Password',
'Language',
'Country or Region'
]
},
{
'title': 'Additional Information',
'fields': [
'Education Completed',
'Gender',
'Year of Birth',
'Preferred Language',
]
}
]
self.assertEqual(self.account_settings_page.sections_structure(), expected_sections_structure)
def _test_readonly_field(self, field_id, title, value):
"""
Test behavior of a readonly field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_readonly_field(field_id), value)
def _test_text_field(
self, field_id, title, initial_value, new_invalid_value, new_valid_values, success_message=SUCCESS_MESSAGE,
assert_after_reload=True
):
"""
Test behaviour of a text field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), initial_value)
self.assertEqual(
self.account_settings_page.value_for_text_field(field_id, new_invalid_value), new_invalid_value
)
self.account_settings_page.wait_for_indicator(field_id, 'validation-error')
self.browser.refresh()
self.assertNotEqual(self.account_settings_page.value_for_text_field(field_id), new_invalid_value)
for new_value in new_valid_values:
self.assertEqual(self.account_settings_page.value_for_text_field(field_id, new_value), new_value)
self.account_settings_page.wait_for_message(field_id, success_message)
if assert_after_reload:
self.browser.refresh()
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), new_value)
def _test_dropdown_field(
self, field_id, title, initial_value, new_values, success_message=SUCCESS_MESSAGE, reloads_on_save=False
):
"""
Test behaviour of a dropdown field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id), initial_value)
for new_value in new_values:
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id, new_value), new_value)
# An XHR request is made when changing the field
self.account_settings_page.wait_for_ajax()
if reloads_on_save:
self.account_settings_page.wait_for_loading_indicator()
else:
self.browser.refresh()
self.account_settings_page.wait_for_page()
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id), new_value)
def _test_link_field(self, field_id, title, link_title, field_type, success_message):
"""
Test behaviour a link field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title)
self.account_settings_page.click_on_link_in_link_field(field_id, field_type=field_type)
self.account_settings_page.wait_for_message(field_id, success_message)
def test_username_field(self):
"""
Test behaviour of "Username" field.
"""
self._test_readonly_field('username', 'Username', self.username)
def test_full_name_field(self):
"""
Test behaviour of "Full Name" field.
"""
self._test_text_field(
u'name',
u'Full Name',
self.full_name,
u'@',
[u'another name', self.full_name],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event('name', self.full_name, 'another name'),
self.expected_settings_changed_event('name', 'another name', self.full_name),
],
actual_events
)
def test_email_field(self):
"""
Test behaviour of "Email" field.
"""
email = u"test@example.com"
username, user_id = self.log_in_as_unique_user(email=email)
self.visit_account_settings_page()
self._test_text_field(
u'email',
u'Email Address',
email,
u'test@example.com' + XSS_INJECTION,
[u'me@here.com', u'you@there.com'],
success_message='Click the link in the message to update your email address.',
assert_after_reload=False
)
actual_events = self.wait_for_events(
event_filter=self.settings_change_initiated_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_change_initiated_event(
'email', email, 'me@here.com', username=username, user_id=user_id),
# NOTE the first email change was never confirmed, so old has not changed.
self.expected_settings_change_initiated_event(
'email', email, 'you@there.com', username=username, user_id=user_id),
],
actual_events
)
# Email is not saved until user confirms, so no events should have been
# emitted.
self.assert_no_setting_changed_event()
def test_password_field(self):
"""
Test behaviour of "Password" field.
"""
self._test_link_field(
u'password',
u'Password',
u'Reset Your Password',
u'button',
success_message='Click the link in the message to reset your password.',
)
event_filter = self.expected_settings_change_initiated_event('password', None, None)
self.wait_for_events(event_filter=event_filter, number_of_matches=1)
# Like email, since the user has not confirmed their password change,
# the field has not yet changed, so no events will have been emitted.
self.assert_no_setting_changed_event()
@skip(
'On bokchoy test servers, language changes take a few reloads to fully realize '
'which means we can no longer reliably match the strings in the html in other tests.'
)
def test_language_field(self):
"""
Test behaviour of "Language" field.
"""
self._test_dropdown_field(
u'pref-lang',
u'Language',
u'English',
[u'Dummy Language (Esperanto)', u'English'],
reloads_on_save=True,
)
def test_education_completed_field(self):
"""
Test behaviour of "Education Completed" field.
"""
self._test_dropdown_field(
u'level_of_education',
u'Education Completed',
u'',
[u'Bachelor\'s degree', u''],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event('level_of_education', None, 'b'),
self.expected_settings_changed_event('level_of_education', 'b', None),
],
actual_events
)
def test_gender_field(self):
"""
Test behaviour of "Gender" field.
"""
self._test_dropdown_field(
u'gender',
u'Gender',
u'',
[u'Female', u''],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event('gender', None, 'f'),
self.expected_settings_changed_event('gender', 'f', None),
],
actual_events
)
def test_year_of_birth_field(self):
"""
Test behaviour of "Year of Birth" field.
"""
# Note that when we clear the year_of_birth here we're firing an event.
self.assertEqual(self.account_settings_page.value_for_dropdown_field('year_of_birth', ''), '')
expected_events = [
self.expected_settings_changed_event('year_of_birth', None, 1980),
self.expected_settings_changed_event('year_of_birth', 1980, None),
]
with self.assert_events_match_during(self.settings_changed_event_filter, expected_events):
self._test_dropdown_field(
u'year_of_birth',
u'Year of Birth',
u'',
[u'1980', u''],
)
def test_country_field(self):
"""
Test behaviour of "Country or Region" field.
"""
self._test_dropdown_field(
u'country',
u'Country or Region',
u'',
[u'Pakistan', u'Palau'],
)
def test_preferred_language_field(self):
"""
Test behaviour of "Preferred Language" field.
"""
self._test_dropdown_field(
u'language_proficiencies',
u'Preferred Language',
u'',
[u'Pushto', u''],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event(
'language_proficiencies', [], [{'code': 'ps'}], table='student_languageproficiency'),
self.expected_settings_changed_event(
'language_proficiencies', [{'code': 'ps'}], [], table='student_languageproficiency'),
],
actual_events
)
def test_linked_accounts(self):
"""
Test that fields for third party auth providers exist.
Currently there is no way to test the whole authentication process
because that would require accounts with the providers.
"""
providers = (
['auth-oa2-facebook', 'Facebook', 'Link Your Account'],
['auth-oa2-google-oauth2', 'Google', 'Link Your Account'],
)
# switch to "Linked Accounts" tab
self.account_settings_page.switch_account_settings_tabs('accounts-tab')
for field_id, title, link_title in providers:
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title)
def test_order_history(self):
"""
Test that we can see orders on Order History tab.
"""
# switch to "Order History" tab
self.account_settings_page.switch_account_settings_tabs('orders-tab')
# verify that we are on correct tab
self.assertTrue(self.account_settings_page.is_order_history_tab_visible)
expected_order_data = {
'title': 'Test Course',
'date': 'Date Placed:\nApr 21, 2016',
'price': 'Cost:\n$100.0',
'number': 'Order Number:\nEdx-123'
}
for field_name, value in expected_order_data.iteritems():
self.assertEqual(
self.account_settings_page.get_value_of_order_history_row_item('order-Edx-123', field_name), value
)
self.assertTrue(self.account_settings_page.order_button_is_visible('order-Edx-123'))
@attr('a11y')
class AccountSettingsA11yTest(AccountSettingsTestMixin, WebAppTest):
"""
Class to test account settings accessibility.
"""
def test_account_settings_a11y(self):
"""
Test the accessibility of the account settings page.
"""
self.log_in_as_unique_user()
self.visit_account_settings_page()
self.account_settings_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'link-href', # TODO: AC-233
],
})
self.account_settings_page.a11y_audit.check_for_accessibility_errors()
|
waheedahmed/edx-platform
|
common/test/acceptance/tests/lms/test_account_settings.py
|
Python
|
agpl-3.0
| 18,554
|
[
"VisIt"
] |
1ad4d64495e0118b0e96b9de58d9c34cea2f4ec127d20dc1a86d82df8124ecc2
|
#!/usr/bin/python
# -*- coding: utf8 -*-
"""
wget http://people.duke.edu/~ccc14/pcfb/_downloads/ls_orchid.fasta
wget ftp://ftp.expasy.org/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz
"""
from Bio import Entrez
from Bio import Medline
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.Data import CodonTable
from Bio.Seq import Seq
from Bio.SeqUtils import GC
import os.path
DATAROOT = "/media/d/soft/gene"
def simple():
my_seq = Seq("AGTACACTGGT")
print my_seq.complement()
print my_seq.reverse_complement()
def simple2():
my_seq = Seq("AGTACACTGGT", IUPAC.unambiguous_dna)
print my_seq.alphabet
print my_seq[4:12]
print my_seq[::-1]
print my_seq.count("A")
print GC(my_seq)
print my_seq.transcribe()
print my_seq.translate()
def translate():
messenger_rna = Seq("AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG", IUPAC.unambiguous_rna)
print messenger_rna.translate()
def fasta():
filepath = os.path.join(DATAROOT, "ls_orchid.fasta")
for seq_record in SeqIO.parse(filepath, "fasta"):
print seq_record.id
print repr(seq_record.seq)
print len(seq_record)
def codon_table():
standard_table = CodonTable.unambiguous_dna_by_id[1]
mito_table = CodonTable.unambiguous_dna_by_id[2]
print standard_table
print mito_table
def entrez():
"""
IMPORTANT NOTE:
http://people.duke.edu/~ccc14/pcfb/biopython/BiopythonEntrez.html
"""
Entrez.email = "adam.richards@stat.duke.edu"
Entrez.tool = "MyLocalScript"
# What databases do I have access to
handle = Entrez.einfo()
record = Entrez.read(handle)
print record["DbList"]
# What if I want info about a database
handle = Entrez.einfo(db="pubmed")
record = Entrez.read(handle)
print record["DbInfo"]["Description"]
print record["DbInfo"]["Count"]
print record["DbInfo"]["LastUpdate"]
for field in record["DbInfo"]["FieldList"]:
print "%(Name)s, %(FullName)s, %(Description)s" % field
# How do I search a db for a given term
handle = Entrez.esearch(db="pubmed", term="biopython")
record = Entrez.read(handle)
print record["IdList"]
# More sample
handle = Entrez.esearch(db="nucleotide", term="Cypripedioideae[Orgn] AND matK[Gene]")
record = Entrez.read(handle)
print record["Count"]
print record["IdList"]
# Get all journals that have `computational` as a term
handle = Entrez.esearch(db="journals", term="computational")
record = Entrez.read(handle)
print record["Count"]
print record["IdList"]
# I have a term now I want the item itself
handle = Entrez.efetch(db="nucleotide", id="186972394", rettype="gb", retmode="text")
record = SeqIO.read(handle, "genbank")
handle.close()
print record
handle = Entrez.efetch(db="pubmed", id="21210977")
print handle.read()
def efetch():
Entrez.email = "A.N.Other@example.com"
handle = Entrez.efetch(db="nucleotide", id="186972394", rettype="gb", retmode="text")
print handle.read()
def pubmed():
# Get the count of papers about orchid only in database pubmed
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.egquery(term="orchid")
record = Entrez.read(handle)
for row in record["eGQueryResult"]:
if row["DbName"] == "pubmed":
print "The count of papers about orchid in database pubmed:", row["Count"]
# Get the list of ids of above
handle = Entrez.esearch(db="pubmed", term="orchid", retmax=100)
record = Entrez.read(handle)
idlist = record["IdList"]
print "The id list of papers about orchid in database pubmed:", idlist
print
# Search papers author by "Liu ZJ" from pubmed
handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline",
retmode="text")
records = Medline.parse(handle)
search_author = "Liu ZJ"
for record in records:
if "AU" not in record:
continue
if search_author in record["AU"]:
print "Author %s found." % search_author
print "title:", record.get("TI", "?")
print "authors:", record.get("AU", "?")
print "source:", record.get("SO", "?")
print
def genbank():
"""
检索Opuntia (多刺的梨型仙人掌)获得所有满足要求的GIs(GenBank标志符)
"""
# 首先我们看看有多少个记录
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.egquery(term="Opuntia AND rpl16")
record = Entrez.read(handle)
for row in record["eGQueryResult"]:
if row["DbName"] == "nuccore":
print row["Count"]
# 下载GenBank identifiers的列表
handle = Entrez.esearch(db="nuccore", term="Opuntia AND rpl16")
record = Entrez.read(handle)
gi_list = record["IdList"]
# 使用这些GIs来下载GenBank records
handle = Entrez.efetch(db="nuccore", id=gi_list, rettype="gb", retmode="text")
# 看原始的 GenBank 文件
text = handle.read()
print text
# # 将GenBank 数据转化成 SeqRecord 对象,包括 SeqFeature 对象
# # 逐个查看这些record来寻找我们感兴趣的信息
# records = SeqIO.parse(handle, "gb")
# for record in records:
# print "%s, length %i, with %i features" \
# % (record.name, len(record), len(record.features))
def lineage():
"""
查看物种的谱系关系
找出Cyripedioideae兰花家族的谱系
"""
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.esearch(db="Taxonomy", term="Cypripedioideae")
record = Entrez.read(handle)
handle = Entrez.efetch(db="Taxonomy", id=record["IdList"][0], retmode="xml")
records = Entrez.read(handle)
# 再次,这个record保存了许多的信息:records[0].keys()
# 我们可以直接从这个record获得谱系信息:
print records[0]["Lineage"]
# 这个record数据包含的信息远远超过在这里显示的,如查看 "LineageEx" 而不是
# "Lineage" 相关的信息,谱系里面的 NCBI taxon 标识号信息。
def history_seq():
"""
利用 history 来搜索和下载序列
"""
Entrez.email = "history.user@example.com"
search_handle = Entrez.esearch(db="nucleotide", term="Opuntia[orgn] and rpl16",
usehistory="y")
search_results = Entrez.read(search_handle)
search_handle.close()
gi_list = search_results["IdList"]
count = int(search_results["Count"])
assert count == len(gi_list)
# 得到两个额外的信息, WebEnv 会话cookie 和 QueryKey
webenv = search_results["WebEnv"]
query_key = search_results["QueryKey"]
batch_size = 3
filepath = os.path.join(DATAROOT, "orchid_rpl16.fasta")
out_handle = open(filepath, "w")
for start in range(0, count, batch_size):
end = min(count, start+batch_size)
print "Going to download record %i to %i" % (start+1, end)
fetch_handle = Entrez.efetch(db="nucleotide", rettype="fasta", retmode="text",
retstart=start, retmax=batch_size,
webenv=webenv, query_key=query_key)
data = fetch_handle.read()
fetch_handle.close()
out_handle.write(data)
out_handle.close()
def history_review():
"""
利用history来搜索和下载综述
"""
Entrez.email = "history.user@example.com"
search_results = Entrez.read(Entrez.esearch(db="pubmed",
term="Opuntia[ORGN]",
reldate=365, datetype="pdat",
usehistory="y"))
count = int(search_results["Count"])
print "Found %i results" % count
batch_size = 10
filepath = os.path.join(DATAROOT, "recent_orchid_papers.txt")
out_handle = open(filepath, "w")
for start in range(0, count, batch_size):
end = min(count, start+batch_size)
print "Going to download record %i to %i" % (start+1, end)
fetch_handle = Entrez.efetch(db="pubmed",
rettype="medline", retmode="text",
retstart=start, retmax=batch_size,
webenv=search_results["WebEnv"],
query_key=search_results["QueryKey"])
data = fetch_handle.read()
fetch_handle.close()
out_handle.write(data)
out_handle.close()
def history_paper():
"""
搜索引用文章
"""
Entrez.email = "A.N.Other@example.com"
pmid = "14630660"
results = Entrez.read(Entrez.elink(dbfrom="pubmed", db="pmc",
LinkName="pubmed_pmc_refs", from_uid=pmid))
pmc_ids = [link["Id"] for link in results[0]["LinkSetDb"][0]["Link"]]
print pmc_ids
results2 = Entrez.read(Entrez.elink(dbfrom="pmc", db="pubmed", LinkName="pmc_pubmed",
from_uid=",".join(pmc_ids)))
pubmed_ids = [link["Id"] for link in results2[0]["LinkSetDb"][0]["Link"]]
print pubmed_ids
def main():
fasta()
if __name__ == '__main__':
main()
|
quchunguang/test
|
testpy/testbiopython.py
|
Python
|
mit
| 9,390
|
[
"Biopython"
] |
f695bce8835966d9f4be37111bace1ff12f2414cce331490a9d5dbbb510544aa
|
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
import CogHQLoader
from toontown.toonbase import ToontownGlobals
from direct.gui import DirectGui
from toontown.toonbase import TTLocalizer
from toontown.toon import Toon
from direct.fsm import State
from toontown.coghq import BossbotHQExterior
from toontown.coghq import BossbotHQBossBattle
from toontown.coghq import BossbotOfficeExterior
from toontown.coghq import CountryClubInterior
from toontown.battle import BattleParticles
from pandac.PandaModules import DecalEffect, TextEncoder
import random
aspectSF = 0.7227
class BossbotCogHQLoader(CogHQLoader.CogHQLoader):
notify = DirectNotifyGlobal.directNotify.newCategory('BossbotCogHQLoader')
def __init__(self, hood, parentFSMState, doneEvent):
CogHQLoader.CogHQLoader.__init__(self, hood, parentFSMState, doneEvent)
self.fsm.addState(State.State('countryClubInterior', self.enterCountryClubInterior, self.exitCountryClubInterior, ['quietZone', 'cogHQExterior']))
for stateName in ['start', 'cogHQExterior', 'quietZone']:
state = self.fsm.getStateNamed(stateName)
state.addTransition('countryClubInterior')
self.musicFile = random.choice(['phase_12/audio/bgm/Bossbot_Entry_v1.ogg', 'phase_12/audio/bgm/Bossbot_Entry_v2.ogg', 'phase_12/audio/bgm/Bossbot_Entry_v3.ogg'])
self.cogHQExteriorModelPath = 'phase_12/models/bossbotHQ/CogGolfHub'
self.cogHQLobbyModelPath = 'phase_12/models/bossbotHQ/CogGolfCourtyard'
self.geom = None
self.rain = None
self.rainRender = None
self.rainSound = None
return
def load(self, zoneId):
CogHQLoader.CogHQLoader.load(self, zoneId)
Toon.loadBossbotHQAnims()
def startRain(self):
if not settings.get('want-particle-effects', True):
return
elif self.geom is None:
return
else:
self.rain = BattleParticles.loadParticleFile('raindisk.ptf')
self.rain.setPos(0, 0, 20)
self.rainRender = self.geom.attachNewNode('rainRender')
self.rainRender.setDepthWrite(0)
self.rainRender.setBin('fixed', 1)
self.rain.start(camera, self.rainRender)
self.rainSound = base.loadSfx('phase_12/audio/sfx/CHQ_rain_ambient.ogg')
base.playSfx(self.rainSound, looping=1, volume=0.25)
return
def stopRain(self):
if self.rain:
self.rain.cleanup()
self.rainSound.stop()
def unload(self):
del self.rain
del self.rainRender
del self.rainSound
Toon.unloadBossbotHQAnims()
CogHQLoader.CogHQLoader.unload(self)
def unloadPlaceGeom(self):
if self.geom:
self.geom.removeNode()
self.geom = None
CogHQLoader.CogHQLoader.unloadPlaceGeom(self)
return
def loadPlaceGeom(self, zoneId):
self.notify.info('loadPlaceGeom: %s' % zoneId)
zoneId = zoneId - zoneId % 100
self.notify.debug('zoneId = %d ToontownGlobals.BossbotHQ=%d' % (zoneId, ToontownGlobals.BossbotHQ))
if zoneId == ToontownGlobals.BossbotHQ:
self.geom = loader.loadModel(self.cogHQExteriorModelPath)
gzLinkTunnel = self.geom.find('**/LinkTunnel1')
gzLinkTunnel.setName('linktunnel_gz_17000_DNARoot')
self.makeSigns()
top = self.geom.find('**/TunnelEntrance')
origin = top.find('**/tunnel_origin')
origin.setH(-33.33)
elif zoneId == ToontownGlobals.BossbotLobby:
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: COGHQ: Visit BossbotLobby')
self.notify.debug('cogHQLobbyModelPath = %s' % self.cogHQLobbyModelPath)
self.geom = loader.loadModel(self.cogHQLobbyModelPath)
else:
self.notify.warning('loadPlaceGeom: unclassified zone %s' % zoneId)
CogHQLoader.CogHQLoader.loadPlaceGeom(self, zoneId)
def makeSigns(self):
def makeSign(topStr, signStr, textId):
top = self.geom.find('**/' + topStr)
sign = top.find('**/' + signStr)
locator = top.find('**/sign_origin')
signText = DirectGui.OnscreenText(text=TextEncoder.upper(TTLocalizer.GlobalStreetNames[textId][-1]), font=ToontownGlobals.getSuitFont(), scale=TTLocalizer.BCHQLsignText, fg=(0, 0, 0, 1), parent=sign)
signText.setPosHpr(locator, 0, -0.1, -0.25, 0, 0, 0)
signText.setDepthWrite(0)
makeSign('Gate_2', 'Sign_6', 10700)
makeSign('TunnelEntrance', 'Sign_2', 1000)
makeSign('Gate_3', 'Sign_3', 10600)
makeSign('Gate_4', 'Sign_4', 10500)
makeSign('GateHouse', 'Sign_5', 10200)
def getExteriorPlaceClass(self):
self.notify.debug('getExteriorPlaceClass')
return BossbotHQExterior.BossbotHQExterior
def getBossPlaceClass(self):
self.notify.debug('getBossPlaceClass')
return BossbotHQBossBattle.BossbotHQBossBattle
def enterCogHQExterior(self, requestStatus):
self.placeClass = self.getExteriorPlaceClass()
self.enterPlace(requestStatus)
self.hood.spawnTitleText(requestStatus['zoneId'])
self.startRain()
def exitCogHQExterior(self):
self.stopRain()
taskMgr.remove('titleText')
self.hood.hideTitleText()
self.exitPlace()
self.placeClass = None
return
def enterCogHQLobby(self, requestStatus):
CogHQLoader.CogHQLoader.enterCogHQLobby(self, requestStatus)
self.startRain()
self.hood.setFog()
def exitCogHQLobby(self):
CogHQLoader.CogHQLoader.exitCogHQLobby(self)
self.stopRain()
def enterCountryClubInterior(self, requestStatus):
self.placeClass = CountryClubInterior.CountryClubInterior
self.notify.info('enterCountryClubInterior, requestStatus=%s' % requestStatus)
self.countryClubId = requestStatus['countryClubId']
self.enterPlace(requestStatus)
def exitCountryClubInterior(self):
self.exitPlace()
self.placeClass = None
del self.countryClubId
return
def enterStageInterior(self, requestStatus):
self.placeClass = StageInterior.StageInterior
self.stageId = requestStatus['stageId']
self.enterPlace(requestStatus)
def exitStageInterior(self):
self.exitPlace()
self.placeClass = None
return
def enterCogHQBossBattle(self, requestStatus):
self.notify.debug('BossbotCogHQLoader.enterCogHQBossBattle')
CogHQLoader.CogHQLoader.enterCogHQBossBattle(self, requestStatus)
def exitCogHQBossBattle(self):
self.notify.debug('BossbotCogHQLoader.exitCogHQBossBattle')
CogHQLoader.CogHQLoader.exitCogHQBossBattle(self)
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/coghq/BossbotCogHQLoader.py
|
Python
|
apache-2.0
| 6,920
|
[
"VisIt"
] |
5b19d962c28ffbaaf7ab23894c617c1562dda60a79926c2bc55d35a611b0bd48
|
import re
import pytz
import datetime as dt
from collections import defaultdict
import lxml.html
from openstates.scrape import Scraper, Bill, VoteEvent
from utils import LXMLMixin
from .utils import clean_text, house_get_actor_from_action, senate_get_actor_from_action
bill_types = {
"HB ": "bill",
"HJR": "joint resolution",
"HCR": "concurrent resolution",
"SB ": "bill",
"SJR": "joint resolution",
"SCR": "concurrent resolution",
}
TIMEZONE = pytz.timezone("America/Chicago")
class MOBillScraper(Scraper, LXMLMixin):
_house_base_url = "http://www.house.mo.gov"
# List of URLS that aren't working when we try to visit them (but
# probably should work):
_bad_urls = []
_subjects = defaultdict(list)
_session_id = ""
def custom_header_func(self, url):
return {"user-agent": "openstates.org"}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
super(Scraper, self).__init__(header_func=self.custom_header_func)
self._scrape_subjects(self.latest_session())
def _get_action(self, actor, action):
# Alright. This covers both chambers and everyting else.
flags = [
("Introduced", "introduction"),
("Offered", "introduction"),
("First Read", "reading-1"),
("Read Second Time", "reading-2"),
("Second Read", "reading-2"),
# make sure passage is checked before reading-3
("Third Read and Passed", "passage"),
("Reported Do Pass", "committee-passage"),
("Voted Do Pass", "committee-passage"),
("Third Read", "reading-3"),
("Referred", "referral-committee"),
("Withdrawn", "withdrawal"),
("S adopted", "passage"),
("Truly Agreed To and Finally Passed", "passage"),
("Signed by Governor", "executive-signature"),
("Approved by Governor", "executive-signature"),
("Vetoed by Governor", "executive-veto"),
("Legislature voted to override Governor's veto", "veto-override-passage"),
]
categories = []
for flag, acat in flags:
if flag in action:
categories.append(acat)
return categories or None
def _get_votes(self, date, actor, action, bill, url):
vre = r"(?P<leader>.*)(AYES|YEAS):\s+(?P<yeas>\d+)\s+(NOES|NAYS):\s+(?P<nays>\d+).*"
if "YEAS" in action.upper() or "AYES" in action.upper():
match = re.match(vre, action)
if match:
v = match.groupdict()
yes, no = int(v["yeas"]), int(v["nays"])
vote = VoteEvent(
chamber=actor,
motion_text=v["leader"],
result="pass" if yes > no else "fail",
classification="passage",
start_date=TIMEZONE.localize(date),
bill=bill,
)
vote.add_source(url)
yield vote
def _parse_cosponsors_from_bill(self, bill, url):
bill_page = self.get(url).text
bill_page = lxml.html.fromstring(bill_page)
table = bill_page.xpath('//table[@id="CoSponsorTable"]')
assert len(table) == 1
for row in table[0].xpath("./tr"):
name = row[0].text_content()
if re.search(r"no co-sponsors", name, re.IGNORECASE):
continue
bill.add_sponsorship(
row[0].text_content(),
entity_type="person",
classification="cosponsor",
primary=False,
)
def _scrape_subjects(self, session):
self._scrape_senate_subjects(session)
if "S" in session:
self.warning("skipping house subjects for special session")
else:
self._scrape_house_subjects(session)
def session_type(self, session):
# R or S1
if len(session) == 4:
return "R"
elif "S1" in session:
return "E1"
elif "S2" in session:
return "E2"
else:
self.error("Unrecognized Session Type")
def _scrape_senate_subjects(self, session):
self.info("Collecting subject tags from upper house.")
subject_list_url = (
"http://www.senate.mo.gov/{}info/BTS_Web/"
"Keywords.aspx?SessionType={}".format(
session[2:4], self.session_type(session)
)
)
subject_page = self.lxmlize(subject_list_url)
# Create a list of all possible bill subjects.
subjects = self.get_nodes(subject_page, "//h3")
for subject in subjects:
subject_text = self.get_node(
subject, "./a[string-length(text()) > 0]/text()[normalize-space()]"
)
subject_text = re.sub(r"([\s]*\([0-9]+\)$)", "", subject_text)
# Bills are in hidden spans after the subject labels.
bill_ids = subject.getnext().xpath("./b/a/text()[normalize-space()]")
for bill_id in bill_ids:
self.info("Found {}.".format(bill_id))
self._subjects[bill_id].append(subject_text)
def _parse_senate_billpage(self, bill_url, year):
bill_page = self.lxmlize(bill_url)
# get all the info needed to record the bill
# TODO probably still needs to be fixed
bill_id = bill_page.xpath('//*[@id="lblBillNum"]')[0].text_content()
bill_title = bill_page.xpath('//*[@id="lblBillTitle"]')[0].text_content()
bill_desc = bill_page.xpath('//*[@id="lblBriefDesc"]')[0].text_content()
# bill_lr = bill_page.xpath('//*[@id="lblLRNum"]')[0].text_content()
bill_type = "bill"
triplet = bill_id[:3]
if triplet in bill_types:
bill_type = bill_types[triplet]
subs = []
bid = bill_id.replace(" ", "")
if bid in self._subjects:
subs = self._subjects[bid]
self.info("With subjects for this bill")
self.info(bid)
if bid == "XXXXXX":
self.info("Skipping Junk Bill")
return
bill = Bill(
bill_id,
title=bill_desc,
chamber="upper",
legislative_session=self._session_id,
classification=bill_type,
)
bill.subject = subs
bill.add_abstract(bill_desc, note="abstract")
bill.add_source(bill_url)
if bill_title:
bill.add_title(bill_title)
# Get the primary sponsor
try:
sponsor = bill_page.xpath('//a[@id="hlSponsor"]')[0]
except IndexError:
sponsor = bill_page.xpath('//span[@id="lSponsor"]')[0]
bill_sponsor = sponsor.text_content()
# bill_sponsor_link = sponsor.attrib.get('href')
bill.add_sponsorship(
bill_sponsor, entity_type="person", classification="primary", primary=True
)
# cosponsors show up on their own page, if they exist
cosponsor_tag = bill_page.xpath('//a[@id="hlCoSponsors"]')
if len(cosponsor_tag) > 0 and cosponsor_tag[0].attrib.get("href"):
self._parse_senate_cosponsors(bill, cosponsor_tag[0].attrib["href"])
# get the actions
action_url = bill_page.xpath('//a[@id="hlAllActions"]')
if len(action_url) > 0:
action_url = action_url[0].attrib["href"]
self._parse_senate_actions(bill, action_url)
# stored on a separate page
versions_url = bill_page.xpath('//a[@id="hlFullBillText"]')
if len(versions_url) > 0 and versions_url[0].attrib.get("href"):
self._parse_senate_bill_versions(bill, versions_url[0].attrib["href"])
amendment_links = bill_page.xpath('//a[contains(@href,"ShowAmendment.asp")]')
for link in amendment_links:
link_text = link.xpath("string(.)").strip()
if "adopted" in link_text.lower():
link_url = link.xpath("@href")[0]
bill.add_version_link(
link_text,
link_url,
media_type="application/pdf",
on_duplicate="ignore",
)
yield bill
def _parse_senate_bill_versions(self, bill, url):
bill.add_source(url)
versions_page = self.get(url).text
versions_page = lxml.html.fromstring(versions_page)
version_tags = versions_page.xpath("//li/font/a")
# some pages are updated and use different structure
if not version_tags:
version_tags = versions_page.xpath('//tr/td/a[contains(@href, ".pdf")]')
for version_tag in version_tags:
description = version_tag.text_content()
pdf_url = version_tag.attrib["href"]
if pdf_url.endswith("pdf"):
mimetype = "application/pdf"
else:
mimetype = None
bill.add_version_link(
description, pdf_url, media_type=mimetype, on_duplicate="ignore"
)
def _parse_senate_actions(self, bill, url):
bill.add_source(url)
actions_page = self.get(url).text
actions_page = lxml.html.fromstring(actions_page)
bigtable = actions_page.xpath(
"/html/body/font/form/table/tr[3]/td/div/table/tr"
)
for row in bigtable:
date = row[0].text_content()
date = dt.datetime.strptime(date, "%m/%d/%Y")
action = row[1].text_content()
actor = senate_get_actor_from_action(action)
type_class = self._get_action(actor, action)
bill.add_action(
action,
TIMEZONE.localize(date),
chamber=actor,
classification=type_class,
)
def _parse_senate_cosponsors(self, bill, url):
bill.add_source(url)
cosponsors_page = self.get(url).text
cosponsors_page = lxml.html.fromstring(cosponsors_page)
# cosponsors are all in a table
cosponsors = cosponsors_page.xpath('//table[@id="dgCoSponsors"]/tr/td/a')
for cosponsor_row in cosponsors:
# cosponsors include district, so parse that out
cosponsor_string = cosponsor_row.text_content()
cosponsor = clean_text(cosponsor_string)
cosponsor = cosponsor.split(",")[0]
# they give us a link to the congressperson, so we might
# as well keep it.
if cosponsor_row.attrib.get("href"):
# cosponsor_url = cosponsor_row.attrib['href']
bill.add_sponsorship(
cosponsor,
entity_type="person",
classification="cosponsor",
primary=False,
)
else:
bill.add_sponsorship(
cosponsor,
entity_type="person",
classification="cosponsor",
primary=False,
)
def _scrape_house_subjects(self, session):
self.info("Collecting subject tags from lower house.")
subject_list_url = "http://house.mo.gov/LegislationSP.aspx?code=R&category=subjectindex&year={}".format(
session
)
subject_page = self.lxmlize(subject_list_url)
# Create a list of all the possible bill subjects.
subjects = self.get_nodes(
subject_page,
"//div[@id='ContentPlaceHolder1_panelParentDIV']" # ...
"/div[@id='panelDIV']//div[@id='ExpandedPanel']//a",
)
# Find the list of bills within each subject.
for subject in subjects:
subject_text = re.sub(
r"\([0-9]+\).*", "", subject.text, re.IGNORECASE
).strip()
self.info("Searching for bills in {}.".format(subject_text))
subject_page = self.lxmlize(subject.attrib["href"])
bill_nodes = self.get_nodes(
subject_page,
'//table[@id="reportgrid"]/tbody/tr[@class="reportbillinfo"]',
)
# Move onto the next subject if no bills were found.
if bill_nodes is None or not (len(bill_nodes) > 0):
continue
for bill_node in bill_nodes:
bill_id = self.get_node(
bill_node, "(./td)[1]/a/text()[normalize-space()]"
)
# Skip to the next bill if no ID could be found.
if bill_id is None or not (len(bill_id) > 0):
continue
self.info("Found {}.".format(bill_id))
self._subjects[bill_id].append(subject_text)
def _parse_house_actions(self, bill, url):
bill.add_source(url)
actions_page = self.get(url).text
actions_page = lxml.html.fromstring(actions_page)
rows = actions_page.xpath("//table/tr")
for row in rows:
# new actions are represented by having dates in the first td
# otherwise, it's a continuation of the description from the
# previous action
if len(row) > 0 and row[0].tag == "td":
if len(row[0].text_content().strip()) > 0:
date = row[0].text_content().strip()
date = dt.datetime.strptime(date, "%m/%d/%Y")
action = row[2].text_content().strip()
else:
action += "\n" + row[2].text_content()
action = action.rstrip()
actor = house_get_actor_from_action(action)
type_class = self._get_action(actor, action)
yield from self._get_votes(date, actor, action, bill, url)
bill.add_action(
action,
TIMEZONE.localize(date),
chamber=actor,
classification=type_class,
)
def _parse_house_billpage(self, url, year):
bill_list_page = self.get(url).text
bill_list_page = lxml.html.fromstring(bill_list_page)
# find the first center tag, take the text after
# 'House of Representatives' and before 'Bills' as
# the session name
# header_tag = bill_list_page.xpath(
# '//*[@id="ContentPlaceHolder1_lblAssemblyInfo"]'
# )[0].text_content()
# if header_tag.find('1st Extraordinary Session') != -1:
# session = year + ' 1st Extraordinary Session'
# elif header_tag.find('2nd Extraordinary Session') != -1:
# session = year + ' 2nd Extraordinary Session'
# else:
session = year
bills = bill_list_page.xpath('//table[@id="reportgrid"]//tr')
isEven = False
count = 0
bills = bills[2:]
for bill in bills:
if not isEven:
# the non even rows contain bill links, the other rows contain brief
# descriptions of the bill.
count = count + 1
yield from self._parse_house_bill(bill[0][0].attrib["href"], session)
isEven = not isEven
def _parse_house_bill(self, url, session):
# using the print page makes the page simpler, and also *drastically* smaller
# (8k rather than 100k)
url = re.sub("billsummary", "billsummaryprn", url)
url = "%s/%s" % (self._house_base_url, url)
# the URL is an iframed version now, so swap in for the actual bill page
url = url.replace("Bill.aspx", "BillContent.aspx")
url = url.replace("&code=R", "&code=R&style=new")
# http://www.house.mo.gov/Bill.aspx?bill=HB26&year=2017&code=R
# http://www.house.mo.gov/BillContent.aspx?bill=HB26&year=2017&code=R&style=new
bill_page = self.get(url).text
bill_page = lxml.html.fromstring(bill_page)
bill_page.make_links_absolute(url)
bill_id = bill_page.xpath('//*[@class="entry-title"]/div')
if len(bill_id) == 0:
self.info("WARNING: bill summary page is blank! (%s)" % url)
self._bad_urls.append(url)
return
bill_id = bill_id[0].text_content()
bill_id = clean_text(bill_id)
bill_desc = bill_page.xpath('//*[@class="BillDescription"]')[0].text_content()
bill_desc = clean_text(bill_desc)
table_rows = bill_page.xpath("//table/tr")
# if there is a cosponsor all the rows are pushed down one for the extra row
# for the cosponsor:
cosponsorOffset = 0
if table_rows[2][0].text_content().strip() == "Co-Sponsor:":
cosponsorOffset = 1
lr_label_tag = table_rows[3 + cosponsorOffset]
assert lr_label_tag[0].text_content().strip() == "LR Number:"
# bill_lr = lr_label_tag[1].text_content()
lastActionOffset = 0
if (
table_rows[4 + cosponsorOffset][0].text_content().strip()
== "Governor Action:"
):
lastActionOffset = 1
official_title_tag = table_rows[5 + cosponsorOffset + lastActionOffset]
assert official_title_tag[0].text_content().strip() == "Bill String:"
official_title = official_title_tag[1].text_content()
# could substitute the description for the name,
# but keeping it separate for now.
bill_type = "bill"
triplet = bill_id[:3]
if triplet in bill_types:
bill_type = bill_types[triplet]
bill_number = int(bill_id[3:].strip())
else:
bill_number = int(bill_id[3:])
subs = []
bid = bill_id.replace(" ", "")
if bid in self._subjects:
subs = self._subjects[bid]
self.info("With subjects for this bill")
self.info(bid)
if bill_desc == "":
if bill_number <= 20:
# blank bill titles early in session are approp. bills
bill_desc = "Appropriations Bill"
else:
self.error(
"Blank title. Skipping. {} / {} / {}".format(
bill_id, bill_desc, official_title
)
)
return
bill = Bill(
bill_id,
chamber="lower",
title=bill_desc,
legislative_session=self._session_id,
classification=bill_type,
)
bill.subject = subs
bill.add_title(official_title, note="official")
bill.add_source(url)
bill_sponsor = clean_text(table_rows[0][1].text_content())
# try:
# bill_sponsor_link = table_rows[0][1][0].attrib['href']
# except IndexError:
# return
bill.add_sponsorship(
bill_sponsor, entity_type="person", classification="primary", primary=True
)
# check for cosponsors
(sponsors_url,) = bill_page.xpath(
"//a[contains(@href, 'CoSponsors.aspx')]/@href"
)
self._parse_cosponsors_from_bill(bill, sponsors_url)
# actions_link_tag = bill_page.xpath('//div[@class="Sections"]/a')[0]
# actions_link = '%s/%s' % (self._house_base_url,actions_link_tag.attrib['href'])
# actions_link = re.sub("content", "print", actions_link)
(actions_link,) = bill_page.xpath(
"//a[contains(@href, 'BillActions.aspx')]/@href"
)
yield from self._parse_house_actions(bill, actions_link)
# get bill versions
doc_tags = bill_page.xpath('//div[@class="BillDocuments"][1]/span')
for doc_tag in reversed(doc_tags):
doc = clean_text(doc_tag.text_content())
text_url = "%s%s" % (self._house_base_url, doc_tag[0].attrib["href"])
bill.add_document_link(doc, text_url, media_type="text/html")
# get bill versions
version_tags = bill_page.xpath('//div[@class="BillDocuments"][2]/span')
for version_tag in reversed(version_tags):
version = clean_text(version_tag.text_content())
for vurl in version_tag.xpath(".//a"):
if vurl.text == "PDF":
mimetype = "application/pdf"
else:
mimetype = "text/html"
bill.add_version_link(
version,
vurl.attrib["href"],
media_type=mimetype,
on_duplicate="ignore",
)
# house bill versions
# everything between the row containing "Bill Text" in an h2 and the next div.DocHeaderRow
version_rows = bill_page.xpath(
'//div[h2[contains(text(),"Bill Text")]]/'
'following-sibling::div[contains(@class,"DocRow") '
'and count(preceding-sibling::div[contains(@class,"DocHeaderRow")])=1]'
)
for row in version_rows:
# some rows are just broken links, not real versions
if row.xpath('.//div[contains(@class,"textType")]/a/@href'):
version = row.xpath('.//div[contains(@class,"textType")]/a/text()')[
0
].strip()
path = row.xpath('.//div[contains(@class,"textType")]/a/@href')[
0
].strip()
if ".pdf" in path:
mimetype = "application/pdf"
else:
mimetype = "text/html"
bill.add_version_link(
version, path, media_type=mimetype, on_duplicate="ignore"
)
# house bill summaries
# everything between the row containing "Bill Summary" in an h2
# and the next div.DocHeaderRow
summary_rows = bill_page.xpath(
'//div[h2[contains(text(),"Bill Summary")]]/'
'following-sibling::div[contains(@class,"DocRow") '
'and count(following-sibling::div[contains(@class,"DocHeaderRow")])=1]'
)
# if there are no amedments, we need a different xpath for summaries
if not summary_rows:
summary_rows = bill_page.xpath(
'//div[h2[contains(text(),"Bill Summary")]]/'
'following-sibling::div[contains(@class,"DocRow")]'
)
for row in reversed(summary_rows):
version = row.xpath('.//div[contains(@class,"textType")]/a/text()')[
0
].strip()
if version:
path = row.xpath('.//div[contains(@class,"textType")]/a/@href')[
0
].strip()
summary_name = "Bill Summary ({})".format(version)
if ".pdf" in path:
mimetype = "application/pdf"
else:
mimetype = "text/html"
bill.add_document_link(
summary_name, path, media_type=mimetype, on_duplicate="ignore"
)
# house bill amendments
amendment_rows = bill_page.xpath(
'//div[h2[contains(text(),"Amendment")]]/'
'following-sibling::div[contains(@class,"DocRow")]'
)
for row in reversed(amendment_rows):
version = row.xpath('.//div[contains(@class,"DocInfoCell")]/a[1]/text()')[
0
].strip()
path = row.xpath('.//div[contains(@class,"DocInfoCell")]/a[1]/@href')[
0
].strip()
summary_name = "Amendment {}".format(version)
defeated_icon = row.xpath('.//img[contains(@title,"Defeated")]')
if defeated_icon:
summary_name = "{} (Defeated)".format(summary_name)
adopted_icon = row.xpath('.//img[contains(@title,"Adopted")]')
if adopted_icon:
summary_name = "{} (Adopted)".format(summary_name)
distributed_icon = row.xpath('.//img[contains(@title,"Distributed")]')
if distributed_icon:
summary_name = "{} (Distributed)".format(summary_name)
if ".pdf" in path:
mimetype = "application/pdf"
else:
mimetype = "text/html"
bill.add_version_link(
summary_name, path, media_type=mimetype, on_duplicate="ignore"
)
yield bill
def _scrape_upper_chamber(self, session):
self.info("Scraping bills from upper chamber.")
year2 = "%02d" % (int(session[:4]) % 100)
# Save the root URL, since we'll use it later.
bill_root = "http://www.senate.mo.gov/{}info/BTS_Web/".format(year2)
index_url = (
bill_root + "BillList.aspx?SessionType=" + self.session_type(session)
)
index_page = self.get(index_url).text
index_page = lxml.html.fromstring(index_page)
# Each bill is in it's own table (nested within a larger table).
bill_tables = index_page.xpath("//a[@id]")
if not bill_tables:
return
for bill_table in bill_tables:
# Here we just search the whole table string to get the BillID that
# the MO senate site uses.
if re.search(r"dgBillList.*hlBillNum", bill_table.attrib["id"]):
yield from self._parse_senate_billpage(
bill_root + bill_table.attrib.get("href"), session
)
def _scrape_lower_chamber(self, session):
self.info("Scraping bills from lower chamber.")
if "S" in session:
year = session[:4]
code = session[4:]
else:
year = session
code = "R"
bill_page_url = "{}/BillList.aspx?year={}&code={}".format(
self._house_base_url, year, code
)
yield from self._parse_house_billpage(bill_page_url, year)
def scrape(self, chamber=None, session=None):
if not session:
session = self.latest_session()
self.info("no session specified, using %s", session)
# special sessions and other year manipulation messes up the session variable
# but we need it for correct output
self._session_id = session
# if you need to scrape an individual bill for testing:
# yield from self._parse_house_bill('BillContent.aspx?bill=HB397&year=2019&code=R',
# session)
if chamber in ["upper", None]:
yield from self._scrape_upper_chamber(session)
if chamber in ["lower", None]:
yield from self._scrape_lower_chamber(session)
if len(self._bad_urls) > 0:
self.warning("WARNINGS:")
for url in self._bad_urls:
self.warning("{}".format(url))
|
hiteshgarg14/openstates
|
scrapers/mo/bills.py
|
Python
|
gpl-3.0
| 26,867
|
[
"VisIt"
] |
7632fe8292a037ce06615a6ebe12dc13c4aaf020d3fb75d21e84c08c896dceb6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import click
import datetime
from os import path
from lib.common.config import DATA_DIR
from lib.common.utils import Echo as echo
from lib.services.api import KcwikiApi as Api
from lib.services.subtitle import SubtitleService, EnglishSubtitleParser
@click.group()
def update_cmd():
pass
def update_ships():
ships = Api.ships()
results = []
max_id = 0
for ship in ships:
max_id = int(ship['id']) if int(ship['id']) > max_id else max_id
for i in range(max_id+1):
results.append({})
for ship in ships:
_id = int(ship['id'])
try:
results[_id] = ship
except IndexError:
print(_id)
print(ship)
with open('data/ship.json', 'w') as f:
json.dump(results, f)
if len(results) > 0:
echo.info('update ship data success!')
else:
echo.warn('no data')
@update_cmd.command(name='update:ships')
def cmd_update_ships():
"""Update ships data from api.kcwiki.moe"""
update_ships()
@update_cmd.command(name='update:enemies')
def update_enemies():
"""Update enemies data from api.kcwiki.moe"""
pass
@update_cmd.command(name='update:subtitles')
@click.argument('mode', default='main')
@click.option('--scope', '-s', default='all')
@click.pass_obj
def update_subtitles(ctx, mode, scope):
"""Update kancolle musume quotes"""
service = SubtitleService()
if mode == 'main':
subtitles = service.get(scope)
now = datetime.datetime.now().strftime('%Y%m%d%H')
subtitles['zh']['version'] = now
subtitles['jp']['version'] = now
json.dump(subtitles['zh'], open(path.join(DATA_DIR, 'subtitles.json'), 'w'))
json.dump(subtitles['jp'], open(path.join(DATA_DIR, 'subtitlesJP.json'), 'w'))
json.dump(subtitles['tw'], open(path.join(DATA_DIR, 'subtitlesTW.json'), 'w'))
json.dump(subtitles['distinct'], open(path.join(DATA_DIR, 'subtitles_distinct.json'), 'w'))
elif mode == 'deploy':
service.deploy()
echo.info('Done.')
|
kcwikizh/kcwiki-scripts
|
lib/commands/update.py
|
Python
|
mit
| 2,098
|
[
"MOE"
] |
001161cceb036e64f3d03f26d6208963254a5f4c7e65d5ea4eba26c60d131f7f
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd
import numpy
import unittest as ut
class InteractionsNonBondedTest(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
box_l = 10.
start_pos = numpy.random.rand(3) * box_l
axis = numpy.random.rand(3)
axis /= numpy.linalg.norm(axis)
rel_pos = numpy.cross(numpy.random.rand(3), axis)
rel_pos /= numpy.linalg.norm(rel_pos)
def setUp(self):
self.system.box_l = [self.box_l] * 3
self.system.cell_system.skin = 0.4
self.system.time_step = 1.
self.system.part.add(id=0, pos=self.start_pos, type=0)
self.system.part.add(id=1, pos=self.start_pos + self.rel_pos, type=0)
self.system.part.add(id=2, pos=self.start_pos + self.rel_pos, type=0)
def tearDown(self):
self.system.part.clear()
def rotate_vector(self, v, k, phi):
"""Rotates vector v around unit vector k by angle phi.
Uses Rodrigues' rotation formula."""
vrot = v * numpy.cos(phi) + numpy.cross(k, v) * \
numpy.sin(phi) + k * numpy.dot(k, v) * (1 - numpy.cos(phi))
return vrot
# Required, since assertAlmostEqual does NOT check significant places
def assertFractionAlmostEqual(self, a, b, places=10):
if abs(b) < 1E-8:
self.assertAlmostEqual(a, b)
else:
self.assertAlmostEqual(a / b, 1., places=4)
def assertItemsFractionAlmostEqual(self, a, b):
for i, ai in enumerate(a):
self.assertFractionAlmostEqual(ai, b[i])
# Analytical Expression
def angle_harmonic_potential(self, phi, bend=1.0, phi0=numpy.pi):
return 0.5 * bend * numpy.power(phi - phi0, 2)
# Test Angle Harmonic Potential
@ut.skipIf(not espressomd.has_features(["BOND_ANGLE"]),
"Features not available, skipping test!")
def test_angle_harmonic(self):
ah_bend = 1.
ah_phi0 = 0.4327 * numpy.pi
angle_harmonic = espressomd.interactions.AngleHarmonic(
bend=ah_bend, phi0=ah_phi0)
self.system.bonded_inter.add(angle_harmonic)
self.system.part[0].add_bond((angle_harmonic, 1, 2))
N = 111
d_phi = numpy.pi / N
for i in range(N):
self.system.part[2].pos = self.start_pos + \
self.rotate_vector(self.rel_pos, self.axis, i * d_phi)
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["bonded"]
E_ref = self.angle_harmonic_potential(
phi=i * d_phi, bend=ah_bend, phi0=ah_phi0)
# Check that energies match
self.assertFractionAlmostEqual(E_sim, E_ref)
if __name__ == '__main__':
print("Features: ", espressomd.features())
ut.main()
|
KonradBreitsprecher/espresso
|
testsuite/interactions_bond_angle.py
|
Python
|
gpl-3.0
| 3,560
|
[
"ESPResSo"
] |
8832786c2ba4355d1f4a4b67ce90cb2f9474e5c21b7095706f4534333c8c5b9b
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2008, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
I/O facilities.
"""
__all__ = [
# modules.
'core', 'block', 'domain', 'gambit', 'netcdf', 'vtk', 'vtkxml',
'html',
# module: core.
'fioregy',
]
from .core import fioregy
|
yungyuc/solvcon
|
solvcon/io/__init__.py
|
Python
|
bsd-3-clause
| 1,782
|
[
"NetCDF",
"VTK"
] |
73fc499e5f9a4225423b2d6691993bea12309c755fe1ede446e9a1150d377065
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2008, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Definition of the structure of solvers.
"""
from numbers import Number
from ctypes import Structure
from .gendata import TypeWithBinder
from .solver_core import ALMOST_ZERO
class BaseSolverExedata(Structure):
"""
Execution information for BaseSolver.
"""
from ctypes import c_int, c_double
_fields_ = [
('ncore', c_int), ('neq', c_int),
('time', c_double), ('time_increment', c_double),
]
del c_int, c_double
def __init__(self, *args, **kw):
svr = kw.pop('svr', None)
super(BaseSolverExedata, self).__init__(*args, **kw)
if svr == None:
return
for key in ('ncore', 'neq', 'time', 'time_increment'):
setattr(self, key, getattr(svr, key))
class BaseSolver(metaclass=TypeWithBinder):
"""
Generic solver definition. It is an abstract class and should not be used
to any concrete simulation case. The concrete solver sub-classes should
override the empty init and final methods for initialization and
finalization, respectively.
@cvar _clib_solve: the external dll (accessible through ctypes) which do
the cell loop. Subclass should override it.
@ctype _clib_solve: ctypes.CDLL
@cvar _exedatatype_: the C struct definition in ctypes.Structure.
@ctype _exedatatype_: ctypes.Structure
@cvar MESG_FILENAME_DEFAULT = the default file name for serial solver
object.
@ivar _fpdtype: dtype for the floating point data in the block instance.
@itype _fpdtype: numpy.dtype
@ivar enable_mesg: flag if mesg device should be enabled.
@itype enable_mesg: bool
@ivar mesg: message printer attached to a certain solver object; designed
and mainly used for parallel solver.
@itype mesg: solvcon.helper.Printer
@ivar runanchors: the list for the anchor objects to be run.
@itype runanchors: solvcon.anchor.AnchorList
@ivar exd: execution information for the solver.
@itype exd: ctypes.Structure
@ivar enable_tpool: flag to enable thread pool on binding.
@itype enable_tpool: bool
@ivar tpool: thread pool for solver.
@itype tpool: solvcon.mthread.ThreadPool
@ivar arglists: argument lists for C functions to be executed in the
thread pool.
@itype arglists: list
@ivar mmnames: marching methods name.
@itype mmnames: list
@ivar marchret: return value set for march.
@ivar der: the dictionary to put derived data arrays. Mostly used by
Anchors.
@itype der: dict
"""
_pointers_ = ['exd', 'tpool', 'arglists']
_exedatatype_ = BaseSolverExedata
_clib_solve = None # subclass should override.
MESG_FILENAME_DEFAULT = 'solvcon.solver.log'
MMNAMES = []
def __init__(self, **kw):
from .conf import env
from .anchor import AnchorList
from .gendata import Timer
self._fpdtype = kw.pop('fpdtype', env.fpdtype)
self._fpdtype = env.fpdtype if self._fpdtype==None else self._fpdtype
self.enable_mesg = kw.pop('enable_mesg', False)
self.mesg = None
self.enable_tpool = kw.pop('enable_tpool', True)
self.ncore = kw.pop('ncore', -1)
self.neq = kw.pop('neq')
self.time = kw.pop('time', 0.0)
self.time_increment = kw.pop('time_increment', 0.0)
self.step_global = 0
self.step_current = 0
self.substep_current = 0
self.substep_run = kw.pop('substep_run', 2)
# anchor list.
self.runanchors = AnchorList(self)
# data structure for C/FORTRAN.
self.exd = None
self.tpool = None
self.arglists = None
# marching methods name.
self.mmnames = self.MMNAMES[:]
self.marchret = None
# timer.
self.timer = Timer(vtype=float)
self.ticker = dict()
# derived data.
self.der = dict()
@property
def fpdtype(self):
import numpy
_fpdtype = self._fpdtype
if isinstance(_fpdtype, str):
return getattr(numpy, _fpdtype)
else:
return self._fpdtype
@property
def fpdtypestr(self):
from .dependency import str_of
return str_of(self.fpdtype)
@property
def _clib_solvcon(self):
from .dependency import _clib_solvcon_of
return _clib_solvcon_of(self.fpdtype)
@staticmethod
def detect_ncore():
f = open('/proc/stat')
data = f.read()
f.close()
cpulist = [line for line in data.split('\n') if
line.startswith('cpu')]
cpulist = [line for line in cpulist if line.split()[0] != 'cpu']
return len(cpulist)
def __create_mesg(self, force=False):
"""
Create the message outputing device, which is intended for debugging
and outputing messages related to the solver. The outputing device is
most useful when running distributed solvers. The created device will
be attach to self.
@keyword force: flag to force the creation. Default False,
@type force: bool
@return: nothing
"""
import os
from .helper import Printer
if force: self.enable_mesg = True
if self.enable_mesg:
if self.svrn != None:
main, ext = os.path.splitext(self.MESG_FILENAME_DEFAULT)
tmpl = main + '%d' + ext
dfn = tmpl % self.svrn
dprefix = 'SOLVER%d: ' % self.svrn
else:
dfn = self.MESG_FILENAME_DEFAULT
dprefix = ''
else:
dfn = os.devnull
dprefix = ''
self.mesg = Printer(dfn, prefix=dprefix, override=True)
def dump(self, objfn):
"""
Pickle self into the given filename.
@parameter objfn: the output filename.
@type objfn: str
"""
import cPickle as pickle
holds = dict()
self.unbind()
for key in ['mesg',]:
holds[key] = getattr(self, key)
setattr(self, key, None)
pickle.dump(self, open(objfn, 'wb'), pickle.HIGHEST_PROTOCOL)
for key in holds:
setattr(self, key, holds[key])
self.bind()
def _tcall(self, *args, **kw):
"""
Use thread pool to call C functions in parallel (shared-memory).
"""
if not self.tpool:
raise RuntimeError('tpool is not available in %s'%str(self))
from ctypes import byref, c_int
from numpy import zeros
ncore = self.ncore
cfunc = args[0]
iter_start = args[1]
iter_end = args[2]
tickerkey = kw.pop('tickerkey', None)
if ncore > 0:
if len(args)>3:
alsts = list()
for it in range(self.ncore):
alst = [byref(self.exd), c_int(0), c_int(0)]
alst.extend(args[3:])
alsts.append(alst)
else:
alsts = self.arglists
incre = (iter_end-iter_start)/ncore + 1
istart = iter_start
for it in range(ncore):
iend = min(istart+incre, iter_end)
alsts[it][1].value = istart
alsts[it][2].value = iend
istart = iend
ret = self.tpool(cfunc, alsts)
else:
alst = [byref(self.exd), c_int(iter_start), c_int(iter_end)]
alst.extend(args[3:])
ret = [cfunc(*alst)]
if tickerkey != None:
if tickerkey not in self.ticker:
self.ticker[tickerkey] = zeros(len(ret), dtype='int32')
for it in range(len(ret)):
self.ticker[tickerkey][it] += ret[it]
return ret
def bind(self):
"""
Put everything that cannot be pickled, such as file objects, ctypes
pointers, etc., into self.
@return: nothing
"""
import sys
from ctypes import byref, c_int
from solvcon.mthread import ThreadPool
# create message device.
if self.mesg == None: self.__create_mesg()
# detect number of cores.
if self.ncore == -1 and sys.platform.startswith('linux2'):
self.ncore = self.detect_ncore()
# create executional data.
exdtype = self._exedatatype_
self.exd = exdtype(svr=self)
# create thread pool.
if self.enable_tpool:
self.tpool = ThreadPool(nthread=self.ncore)
self.arglists = list()
for it in range(self.ncore):
self.arglists.append([byref(self.exd), c_int(0), c_int(0)])
def init(self, **kw):
pass
def final(self):
self.unbind()
def provide(self):
self.runanchors('provide')
def preloop(self):
self.runanchors('preloop')
def postloop(self):
self.runanchors('postloop')
def exhaust(self):
self.runanchors('exhaust')
def _set_time(self, time, time_increment):
"""
Set the time for self and structures.
"""
self.exd.time = self.time = time
self.exd.time_increment = self.time_increment = time_increment
def march(self, time, time_increment, steps_run, worker=None):
"""
Default marcher for the solver object.
@param time: starting time of marching.
@type time: float
@param time_increment: temporal interval for the time step.
@type time_increment: float
@param steps_run: the count of time steps to run.
@type steps_run: int
@return: arbitrary return value.
@rtype: float
"""
from time import time as _time
self.marchret = dict()
self.step_current = 0
self.runanchors('premarch')
while self.step_current < steps_run:
self.substep_current = 0
self.runanchors('prefull')
t0= _time()
while self.substep_current < self.substep_run:
self._set_time(time, time_increment)
self.runanchors('presub')
# run marching methods.
for mmname in self.mmnames:
method = getattr(self, mmname)
t1 = _time()
self.runanchors('pre'+mmname)
t2 = _time()
method(worker=worker)
self.timer.increase(mmname, _time() - t2)
self.runanchors('post'+mmname)
self.timer.increase(mmname+'_a', _time() - t1)
# increment time.
time += time_increment/self.substep_run
self._set_time(time, time_increment)
self.substep_current += 1
self.runanchors('postsub')
self.timer.increase('march', _time() - t0)
self.step_global += 1
self.step_current += 1
self.runanchors('postfull')
self.runanchors('postmarch')
if worker:
worker.conn.send(self.marchret)
return self.marchret
class FakeBlockVtk(object):
"""
Faked block from solver for being used by VTK.
"""
def __init__(self, svr):
self.ndim = svr.ndim
self.nnode = svr.nnode
self.ncell = svr.ncell
self.ndcrd = svr.ndcrd[svr.ngstnode:]
self.clnds = svr.clnds[svr.ngstcell:]
self.cltpn = svr.cltpn[svr.ngstcell:]
self.fpdtype = svr.fpdtype
class BlockSolver(BaseSolver):
"""
Generic class for multi-dimensional (implemented with Block)
sequential/parallel solvers. Meta, metric, and connectivity data arrays
are absorbed into the instance of this class.
Before the invocation of init() method, bind() method must be called.
@note: When subclass BlockSolver, in the init() method in the subclass must
be initilized first, and the super().init() can then be called. Otherwise
the BCs can't set correct information to the solver.
@cvar _interface_init_: list of attributes (arrays) to be exchanged on
interface when initialized.
@ctype _interface_init_: list
@ivar ibcthread: flag if using threads.
@itype ibcthread: bool
@ivar svrn: serial number of solver object.
@itype svrn: int
@ivar nsvr: number of solver objects.
@itype nsvr: int
@ivar grpnames: list of names of groups.
@itype grpnames: list
@ivar ngroup: number of groups.
@itype ngroup: int
@ivar bclist: list of BCs.
@itype bclist: list
@ivar ibclist: list of interface BCs.
@itype ibclist: list
@ivar all_simplex: True if the mesh is all-simplex, False otherwise.
@itype all_simplex: bool
@ivar use_incenter: True if the mesh uses incenters, False otherwise.
@itype use_incenter: bool
"""
_interface_init_ = []
_solution_array_ = []
from .block import Block
FCMND = Block.FCMND
CLMND = Block.CLMND
CLMFC = Block.CLMFC
del Block
def __init__(self, blk, *args, **kw):
from numpy import empty
self.ibcthread = kw.pop('ibcthread', False)
super(BlockSolver, self).__init__(*args, **kw)
assert self.fpdtype == blk.fpdtype
self.all_simplex = blk.check_simplex()
self.use_incenter = blk.use_incenter
# index.
self.svrn = blk.blkn
self.nsvr = None
# group.
self.grpnames = blk.grpnames
self.ngroup = len(self.grpnames)
# BCs.
self.bclist = blk.bclist
for bc in self.bclist:
bc.blk = None
bc.svr = self
self.ibclist = None
# mesh shape.
self.ndim = blk.ndim
self.nnode = blk.nnode
self.nface = blk.nface
self.ncell = blk.ncell
self.nbound = blk.nbound
self.ngstnode = blk.ngstnode
self.ngstface = blk.ngstface
self.ngstcell = blk.ngstcell
# meta array.
self.fctpn = blk.shfctpn
self.cltpn = blk.shcltpn
self.clgrp = blk.shclgrp
## connectivity.
self.clnds = blk.shclnds
self.clfcs = blk.shclfcs
self.fcnds = blk.shfcnds
self.fccls = blk.shfccls
## geometry.
self.ndcrd = blk.shndcrd
self.fccnd = blk.shfccnd
self.fcara = blk.shfcara
self.fcnml = blk.shfcnml
self.clcnd = blk.shclcnd
self.clvol = blk.shclvol
# in situ visualization by VTK.
self._ust = None
@property
def ust(self):
from .visual_vtk import make_ust_from_blk
_ust = self._ust
if _ust is None:
fbk = FakeBlockVtk(self)
_ust = make_ust_from_blk(fbk)
self._ust = _ust
return _ust
def bind(self):
"""
Bind all the boundary condition objects.
@note: BC must be bound AFTER solver "pointers". Overridders to the
method should firstly bind all pointers, secondly super binder, and
then methods/subroutines.
"""
super(BlockSolver, self).bind()
# boundary conditions.
for bc in self.bclist:
bc.bind()
def unbind(self):
"""
Unbind all the boundary condition objects.
"""
super(BlockSolver, self).unbind()
for bc in self.bclist:
bc.unbind()
@property
def is_bound(self):
"""
Check boundness for solver as well as BC objects.
"""
if not super(BlockSolver, self).is_bound:
return False
else:
for bc in self.bclist:
if not bc.is_bound:
return False
return True
@property
def is_unbound(self):
"""
Check unboundness for solver as well as BC objects.
"""
if not super(BlockSolver, self).is_unbound:
return False
else:
for bc in self.bclist:
if not bc.is_unbound:
return False
return True
def init(self, **kw):
"""
Check and initialize BCs.
"""
for arrname in self._solution_array_:
arr = getattr(self, arrname)
arr.fill(ALMOST_ZERO) # prevent initializer forgets to set!
for bc in self.bclist:
bc.init(**kw)
super(BlockSolver, self).init(**kw)
def boundcond(self):
"""
Update the boundary conditions.
@return: nothing.
"""
pass
def call_non_interface_bc(self, name, *args, **kw):
"""
Call method of each of non-interface BC objects in my list.
@param name: name of the method of BC to call.
@type name: str
@return: nothing
"""
from .boundcond import interface
bclist = [bc for bc in self.bclist if not isinstance(bc, interface)]
for bc in bclist:
try:
getattr(bc, name)(*args, **kw)
except Exception as e:
e.args = tuple([str(bc), name] + list(e.args))
raise
##################################################
# parallelization.
##################################################
def remote_setattr(self, name, var):
"""
Remotely set attribute of worker.
"""
return setattr(self, name, var)
def pull(self, arrname, inder=False, worker=None):
"""
Pull data array to dealer (rpc) through worker object.
@param arrname: the array to pull to master.
@type arrname: str
@param inder: the data array is derived data array.
@type inder: bool
@keyword worker: the worker object for communication.
@type worker: solvcon.rpc.Worker
@return: nothing.
"""
conn = worker.conn
if inder:
arr = self.der[arrname]
else:
arr = getattr(self, arrname)
conn.send(arr)
def push(self, marr, arrname, start=0, inder=False):
"""
Push data array received from dealer (rpc) into self.
@param marr: the array passed in.
@type marr: numpy.ndarray
@param arrname: the array to pull to master.
@type arrname: str
@param start: the starting index of pushing.
@type start: int
@param inder: the data array is derived data array.
@type inder: bool
@return: nothing.
"""
if inder:
arr = self.der[arrname]
else:
arr = getattr(self, arrname)
arr[start:] = marr[start:]
def pullank(self, ankname, objname, worker=None):
"""
Pull data array to dealer (rpc) through worker object.
@param ankname: the name of related anchor.
@type ankname: str
@param objname: the object to pull to master.
@type objname: str
@keyword worker: the worker object for communication.
@type worker: solvcon.rpc.Worker
@return: nothing.
"""
conn = worker.conn
obj = getattr(self.runanchors[ankname], objname)
conn.send(obj)
def init_exchange(self, ifacelist):
from .boundcond import interface
# grab peer index.
ibclist = list()
for pair in ifacelist:
if isinstance(pair, Number) and pair < 0:
ibclist.append(pair)
else:
assert len(pair) == 2
assert self.svrn in pair
ibclist.append(sum(pair)-self.svrn)
# replace with BC object, sendn and recvn.
for bc in self.bclist:
if not isinstance(bc, interface):
continue
it = ibclist.index(bc.rblkn)
sendn, recvn = ifacelist[it]
ibclist[it] = bc, sendn, recvn
self.ibclist = ibclist
def exchangeibc(self, arrname, worker=None):
from time import sleep
from threading import Thread
threads = list()
for ibc in self.ibclist:
# check if sleep or not.
if isinstance(ibc, Number) and ibc < 0:
continue
bc, sendn, recvn = ibc
# determine callable and arguments.
if self.svrn == sendn:
target = self.pushibc
args = arrname, bc, recvn
elif self.svrn == recvn:
target = self.pullibc
args = arrname, bc, sendn
else:
raise ValueError('bc.rblkn = %d != %d or %d' % (
bc.rblkn, sendn, recvn))
kwargs = {'worker': worker}
# call to data transfer.
if self.ibcthread:
threads.append(Thread(
target=target,
args=args,
kwargs=kwargs,
))
threads[-1].start()
else:
target(*args, **kwargs)
if self.ibcthread:
for thread in threads:
thread.join()
def pushibc(self, arrname, bc, recvn, worker=None):
"""
Push data toward selected interface which connect to blocks with larger
serial number than myself.
@param arrname: name of the array in the object to exchange.
@type arrname: str
@param bc: the interface BC to push.
@type bc: solvcon.boundcond.interface
@param recvn: serial number of the peer to exchange data with.
@type recvn: int
@keyword worker: the wrapping worker object for parallel processing.
@type worker: solvcon.rpc.Worker
"""
from numpy import empty
conn = worker.pconns[bc.rblkn]
ngstcell = self.ngstcell
arr = getattr(self, arrname)
# ask the receiver for data.
shape = list(arr.shape)
shape[0] = bc.rclp.shape[0]
rarr = empty(shape, dtype=arr.dtype)
conn.recvarr(rarr) # comm.
slct = bc.rclp[:,0] + ngstcell
arr[slct] = rarr[:]
# provide the receiver with data.
slct = bc.rclp[:,2] + ngstcell
conn.sendarr(arr[slct]) # comm.
def pullibc(self, arrname, bc, sendn, worker=None):
"""
Pull data from the interface determined by the serial of peer.
@param arrname: name of the array in the object to exchange.
@type arrname: str
@param bc: the interface BC to pull.
@type bc: solvcon.boundcond.interface
@param sendn: serial number of the peer to exchange data with.
@type sendn: int
@keyword worker: the wrapping worker object for parallel processing.
@type worker: solvcon.rpc.Worker
"""
from numpy import empty
conn = worker.pconns[bc.rblkn]
ngstcell = self.ngstcell
arr = getattr(self, arrname)
# provide sender the data.
slct = bc.rclp[:,2] + ngstcell
conn.sendarr(arr[slct]) # comm.
# ask data from sender.
shape = list(arr.shape)
shape[0] = bc.rclp.shape[0]
rarr = empty(shape, dtype=arr.dtype)
conn.recvarr(rarr) # comm.
slct = bc.rclp[:,0] + ngstcell
arr[slct] = rarr[:]
|
yungyuc/solvcon
|
solvcon/solver_legacy.py
|
Python
|
bsd-3-clause
| 24,796
|
[
"VTK"
] |
9a851408d9d51b079694026af15513bcdd01c5d3648bb041100609187e2f6118
|
from __future__ import print_function
import unittest
import os
import io
from rdkit.six.moves import cPickle as pickle
from rdkit import Chem
from rdkit.Chem import rdPartialCharges
from rdkit import RDConfig
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test0HalgrenSet(self) :
smiSup = Chem.SmilesMolSupplier(os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','PartialCharges','Wrap','test_data','halgren.smi'),delimiter='\t')
#parse the original file
with open(os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','PartialCharges','Wrap','test_data','halgren_out.txt'),
'r') as infil:
lines = infil.readlines()
tab = Chem.GetPeriodicTable()
olst = []
for mol in smiSup :
rdPartialCharges.ComputeGasteigerCharges(mol)
tstr = "Molecule: "
tstr += mol.GetProp("_Name")
olst.append(tstr)
for i in range(mol.GetNumAtoms()) :
at = mol.GetAtomWithIdx(i)
en = tab.GetElementSymbol(at.GetAtomicNum())
chg = float(at.GetProp("_GasteigerCharge"))
tstr = "%i %s %6.4f"%(i, en, chg)
olst.append(tstr)
i = 0
for line in lines:
self.assertTrue(line.strip() == olst[i])
i += 1
def test1PPDataset(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','PartialCharges','Wrap','test_data', 'PP_descrs_regress.2.csv')
infil = open(fileN, 'r')
lines = infil.readlines()
infil.close()
infile = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','PartialCharges','Wrap','test_data', 'PP_combi_charges.pkl')
with open(infile, 'r') as cchtFile:
buf = cchtFile.read().replace('\r\n', '\n').encode('utf-8')
cchtFile.close()
with io.BytesIO(buf) as cchFile:
combiCharges = pickle.load(cchFile)
for lin in lines :
if (lin[0] == '#') :
continue
tlst = lin.strip().split(',')
smi = tlst[0]
rdmol = Chem.MolFromSmiles(smi)
rdPartialCharges.ComputeGasteigerCharges(rdmol)
nat = rdmol.GetNumAtoms()
failed=False
for ai in range(nat) :
rdch = float(rdmol.GetAtomWithIdx(ai).GetProp('_GasteigerCharge'))
if not feq(rdch, combiCharges[smi][ai], 1.e-2) :
failed=True
print(smi, ai, rdch, combiCharges[smi][ai])
if failed: rdmol.Debug()
self.assertFalse(failed)
def test2Params(self):
""" tests handling of Issue187 """
m1 = Chem.MolFromSmiles('C(=O)[O-]')
rdPartialCharges.ComputeGasteigerCharges(m1)
m2 = Chem.MolFromSmiles('C(=O)[O-].[Na+]')
rdPartialCharges.ComputeGasteigerCharges(m2)
for i in range(m1.GetNumAtoms()):
c1 = float(m1.GetAtomWithIdx(i).GetProp('_GasteigerCharge'))
c2 = float(m2.GetAtomWithIdx(i).GetProp('_GasteigerCharge'))
self.assertTrue(feq(c1,c2,1e-4))
def test3Params(self):
""" tests handling of Issue187 """
m2 = Chem.MolFromSmiles('C(=O)[O-].[Na+]')
with self.assertRaisesRegexp(Exception, ""):
rdPartialCharges.ComputeGasteigerCharges(m2,12,1)
def testGithubIssue20(self):
""" tests handling of Github issue 20 """
m1 = Chem.MolFromSmiles('CB(O)O')
rdPartialCharges.ComputeGasteigerCharges(m1)
chgs=[-0.030,0.448,-0.427,-0.427]
for i in range(m1.GetNumAtoms()):
c1 = float(m1.GetAtomWithIdx(i).GetProp('_GasteigerCharge'))
self.assertAlmostEqual(c1,chgs[i],3)
def testGithubIssue577(self):
""" tests handling of Github issue 577 """
m1 = Chem.MolFromSmiles('CCO')
from locale import setlocale, LC_NUMERIC
try:
setlocale(LC_NUMERIC, "de_DE")
except Exception:
# can't set the required locale, might as well just return
return
try:
rdPartialCharges.ComputeGasteigerCharges(m1)
for at in m1.GetAtoms():
float(at.GetProp('_GasteigerCharge'))
finally:
setlocale(LC_NUMERIC, "C")
rdPartialCharges.ComputeGasteigerCharges(m1)
for at in m1.GetAtoms():
float(at.GetProp('_GasteigerCharge'))
if __name__== '__main__':
unittest.main()
|
adalke/rdkit
|
Code/GraphMol/PartialCharges/Wrap/testPartialCharges.py
|
Python
|
bsd-3-clause
| 4,705
|
[
"RDKit"
] |
c069257a47f57f6700874caf26762c6b9bd2c35fe0b00cdfeb7024a5233e6e9d
|
'''
=========================================
Inference for Non-Linear Gaussian Systems
=========================================
This module contains the Unscented Kalman Filter (Wan, van der Merwe 2000)
for state estimation in systems with non-Gaussian noise and non-linear dynamics
'''
from collections import namedtuple
import numpy as np
from numpy import ma
from scipy import linalg
from .utils import array1d, array2d, check_random_state, get_params, preprocess_arguments, check_random_state
from .standard import _last_dims, _determine_dimensionality, _arg_or_default
# represents a collection of sigma points and their associated weights. one
# point per row
SigmaPoints = namedtuple(
'SigmaPoints',
['points', 'weights_mean', 'weights_covariance']
)
# represents mean and covariance of a multivariate normal distribution
Moments = namedtuple('Moments', ['mean', 'covariance'])
def points2moments(points, sigma_noise=None):
'''Calculate estimated mean and covariance of sigma points
Parameters
----------
points : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
SigmaPoints object containing points and weights
sigma_noise : [n_dim_state, n_dim_state] array
additive noise covariance matrix, if any
Returns
-------
moments : Moments object of size [n_dim_state]
Mean and covariance estimated using points
'''
(points, weights_mu, weights_sigma) = points
mu = points.T.dot(weights_mu)
points_diff = points.T - mu[:, np.newaxis]
sigma = points_diff.dot(np.diag(weights_sigma)).dot(points_diff.T)
if sigma_noise is not None:
sigma = sigma + sigma_noise
return Moments(mu.ravel(), sigma)
def moments2points(moments, alpha=None, beta=None, kappa=None):
'''Calculate "sigma points" used in Unscented Kalman Filter
Parameters
----------
moments : [n_dim] Moments object
mean and covariance of a multivariate normal
alpha : float
Spread of the sigma points. Typically 1e-3.
beta : float
Used to "incorporate prior knowledge of the distribution of the state".
2 is optimal is the state is normally distributed.
kappa : float
a parameter which means ????
Returns
-------
points : [2*n_dim+1, n_dim] SigmaPoints
sigma points and associated weights
'''
(mu, sigma) = moments
n_dim = len(mu)
mu = array2d(mu, dtype=float)
if alpha is None:
alpha = 1.0
if beta is None:
beta = 0.0
if kappa is None:
kappa = 3.0 - n_dim
# compute sqrt(sigma)
sigma2 = linalg.cholesky(sigma).T
# Calculate scaling factor for all off-center points
lamda = (alpha * alpha) * (n_dim + kappa) - n_dim
c = n_dim + lamda
# calculate the sigma points; that is,
# mu
# mu + each column of sigma2 * sqrt(c)
# mu - each column of sigma2 * sqrt(c)
# Each column of points is one of these.
points = np.tile(mu.T, (1, 2 * n_dim + 1))
points[:, 1:(n_dim + 1)] += sigma2 * np.sqrt(c)
points[:, (n_dim + 1):] -= sigma2 * np.sqrt(c)
# Calculate weights
weights_mean = np.ones(2 * n_dim + 1)
weights_mean[0] = lamda / c
weights_mean[1:] = 0.5 / c
weights_cov = np.copy(weights_mean)
weights_cov[0] = lamda / c + (1 - alpha * alpha + beta)
return SigmaPoints(points.T, weights_mean, weights_cov)
def unscented_transform(points, f=None, points_noise=None, sigma_noise=None):
'''Apply the Unscented Transform to a set of points
Apply f to points (with secondary argument points_noise, if available),
then approximate the resulting mean and covariance. If sigma_noise is
available, treat it as additional variance due to additive noise.
Parameters
----------
points : [n_points, n_dim_state] SigmaPoints
points to pass into f's first argument and associated weights if f is
defined. If f is unavailable, then f is assumed to be the identity
function.
f : [n_dim_state, n_dim_state_noise] -> [n_dim_state] function
transition function from time t to time t+1, if available.
points_noise : [n_points, n_dim_state_noise] array
points to pass into f's second argument, if any
sigma_noise : [n_dim_state, n_dim_state] array
covariance matrix for additive noise, if any
Returns
-------
points_pred : [n_points, n_dim_state] SigmaPoints
points transformed by f with same weights
moments_pred : [n_dim_state] Moments
moments associated with points_pred
'''
n_points, n_dim_state = points.points.shape
(points, weights_mean, weights_covariance) = points
# propagate points through f
if f is not None:
if points_noise is None:
points_pred = [f(points[i]) for i in range(n_points)]
else:
points_noise = points_noise.points
points_pred = [f(points[i], points_noise[i]) for i in range(n_points)]
else:
points_pred = points
# make each row a predicted point
points_pred = np.vstack(points_pred)
points_pred = SigmaPoints(points_pred, weights_mean, weights_covariance)
# calculate approximate mean, covariance
moments_pred = points2moments(points_pred, sigma_noise)
return (points_pred, moments_pred)
def unscented_correct(cross_sigma, moments_pred, obs_moments_pred, z):
'''Correct predicted state estimates with an observation
Parameters
----------
cross_sigma : [n_dim_state, n_dim_obs] array
cross-covariance between the state at time t given all observations
from timesteps [0, t-1] and the observation at time t
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t given observations from
timesteps [0, t-1]
obs_moments_pred : [n_dim_obs] Moments
mean and covariance of observation at time t given observations from
times [0, t-1]
z : [n_dim_obs] array
observation at time t
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t given observations from time
steps [0, t]
'''
mu_pred, sigma_pred = moments_pred
obs_mu_pred, obs_sigma_pred = obs_moments_pred
n_dim_state = len(mu_pred)
n_dim_obs = len(obs_mu_pred)
if not np.any(ma.getmask(z)):
# calculate Kalman gain
K = cross_sigma.dot(linalg.pinv(obs_sigma_pred))
# correct mu, sigma
mu_filt = mu_pred + K.dot(z - obs_mu_pred)
sigma_filt = sigma_pred - K.dot(cross_sigma.T)
else:
# no corrections to be made
mu_filt = mu_pred
sigma_filt = sigma_pred
return Moments(mu_filt, sigma_filt)
def augmented_points(momentses):
'''Calculate sigma points for augmented UKF
Parameters
----------
momentses : list of Moments
means and covariances for multiple multivariate normals
Returns
-------
pointses : list of Points
sigma points for each element of momentses
'''
# stack everything together
means, covariances = zip(*momentses)
mu_aug = np.concatenate(means)
sigma_aug = linalg.block_diag(*covariances)
moments_aug = Moments(mu_aug, sigma_aug)
# turn augmented representation into sigma points
points_aug = moments2points(moments_aug)
# unstack everything
dims = [len(m) for m in means]
result = []
start = 0
for i in range(len(dims)):
end = start + dims[i]
part = SigmaPoints(
points_aug.points[:, start:end],
points_aug.weights_mean,
points_aug.weights_covariance
)
result.append(part)
start = end
# return
return result
def augmented_unscented_filter_points(mean_state, covariance_state,
covariance_transition,
covariance_observation):
"""Extract sigma points using augmented state representation
Primarily used as a pre-processing step before predicting and updating in
the Augmented UKF.
Parameters
----------
mean_state : [n_dim_state] array
mean of state at time t given observations from time steps 0...t
covariance_state : [n_dim_state, n_dim_state] array
covariance of state at time t given observations from time steps 0...t
covariance_transition : [n_dim_state, n_dim_state] array
covariance of zero-mean noise resulting from transitioning from time
step t to t+1
covariance_observation : [n_dim_obs, n_dim_obs] array
covariance of zero-mean noise resulting from observation state at time
t+1
Returns
-------
points_state : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
sigma points for state at time t
points_transition : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
sigma points for transition noise between time t and t+1
points_observation : [2 * n_dim_state + 1, n_dim_obs] SigmaPoints
sigma points for observation noise at time step t+1
"""
# get sizes of dimensions
n_dim_state = covariance_state.shape[0]
n_dim_obs = covariance_observation.shape[0]
# extract sigma points using augmented representation
state_moments = Moments(mean_state, covariance_state)
transition_noise_moments = (
Moments(np.zeros(n_dim_state), covariance_transition)
)
observation_noise_moments = (
Moments(np.zeros(n_dim_obs), covariance_observation)
)
(points_state, points_transition, points_observation) = (
augmented_points([
state_moments,
transition_noise_moments,
observation_noise_moments
])
)
return (points_state, points_transition, points_observation)
def unscented_filter_predict(transition_function, points_state,
points_transition=None,
sigma_transition=None):
"""Predict next state distribution
Using the sigma points representing the state at time t given observations
from time steps 0...t, calculate the predicted mean, covariance, and sigma
points for the state at time t+1.
Parameters
----------
transition_function : function
function describing how the state changes between times t and t+1
points_state : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the state at time step t given
observations from time steps 0...t
points_transition : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the noise in transitioning from time step
t to t+1, if available. If not, assumes that noise is additive
sigma_transition : [n_dim_state, n_dim_state] array
covariance corresponding to additive noise in transitioning from time
step t to t+1, if available. If not, assumes noise is not additive.
Returns
-------
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to state at time step t+1 given observations
from time steps 0...t. These points have not been "standardized" by the
unscented transform yet.
moments_pred : [n_dim_state] Moments
mean and covariance corresponding to time step t+1 given observations
from time steps 0...t
"""
assert points_transition is not None or sigma_transition is not None, \
"Your system is noiseless? really?"
(points_pred, moments_pred) = (
unscented_transform(
points_state, transition_function,
points_noise=points_transition, sigma_noise=sigma_transition
)
)
return (points_pred, moments_pred)
def unscented_filter_correct(observation_function, moments_pred,
points_pred, observation,
points_observation=None,
sigma_observation=None):
"""Integrate new observation to correct state estimates
Parameters
----------
observation_function : function
function characterizing how the observation at time t+1 is generated
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to moments_pred
observation : [n_dim_state] array
observation at time t+1. If masked, treated as missing.
points_observation : [2*n_dim_state, n_dim_obs] SigmaPoints
sigma points corresponding to predicted observation at time t+1 given
observations from times 0...t, if available. If not, noise is assumed
to be additive.
sigma_observation : [n_dim_obs, n_dim_obs] array
covariance matrix corresponding to additive noise in observation at
time t+1, if available. If missing, noise is assumed to be non-linear.
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t+1
"""
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
(obs_points_pred, obs_moments_pred) = (
unscented_transform(
points_pred, observation_function,
points_noise=points_observation, sigma_noise=sigma_observation
)
)
# Calculate Cov(x_t, z_t | z_{0:t-1})
sigma_pair = (
((points_pred.points - moments_pred.mean).T)
.dot(np.diag(points_pred.weights_mean))
.dot(obs_points_pred.points - obs_moments_pred.mean)
)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
moments_filt = unscented_correct(sigma_pair, moments_pred, obs_moments_pred, observation)
return moments_filt
def augmented_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with arbitrary noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and the
process noise and outputs the next state.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma_filt = np.zeros((T, n_dim_state, n_dim_state))
# TODO use _augumented_unscented_filter_update here
for t in range(T):
# Calculate sigma points for augmented state:
# [actual state, transition noise, observation noise]
if t == 0:
mu, sigma = mu_0, sigma_0
else:
mu, sigma = mu_filt[t - 1], sigma_filt[t - 1]
# extract sigma points using augmented representation
(points_state, points_transition, points_observation) = (
augmented_unscented_filter_points(mu, sigma, Q, R)
)
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1}) and sigma points
# for P(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(points_pred, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
points_transition=points_transition
)
)
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma_filt[t] = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], points_observation=points_observation
)
)
return (mu_filt, sigma_filt)
def augmented_unscented_smoother(mu_filt, sigma_filt, f, Q):
'''Apply the Unscented Kalman Smoother with arbitrary noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and the
process noise and outputs the next state.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma_smooth : [T, n_dim_state, n_dim_state] array
sigma_smooth[t] = covariance of state at time t given observations from
times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma_smooth = np.zeros(sigma_filt.shape)
mu_smooth[-1], sigma_smooth[-1] = mu_filt[-1], sigma_filt[-1]
for t in reversed(range(T - 1)):
# get sigma points for [state, transition noise]
mu = mu_filt[t]
sigma = sigma_filt[t]
moments_state = Moments(mu, sigma)
moments_transition_noise = Moments(np.zeros(n_dim_state), Q)
(points_state, points_transition) = (
augmented_points([moments_state, moments_transition_noise])
)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
f_t = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = unscented_transform(
points_state, f_t, points_noise=points_transition
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
smoother_gain = sigma_pair.dot(linalg.pinv(moments_pred.covariance))
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
sigma_smooth[t] = (
sigma_filt[t]
+ smoother_gain
.dot(sigma_smooth[t + 1] - moments_pred.covariance)
.dot(smoother_gain.T)
)
return (mu_smooth, sigma_smooth)
def additive_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with additive noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma_filt = np.zeros((T, n_dim_state, n_dim_state))
for t in range(T):
# Calculate sigma points for P(x_{t-1} | z_{0:t-1})
if t == 0:
mu, sigma = mu_0, sigma_0
else:
mu, sigma = mu_filt[t - 1], sigma_filt[t - 1]
points_state = moments2points(Moments(mu, sigma))
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, sigma_transition=Q
)
)
points_pred = moments2points(moments_pred)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma_filt[t] = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], sigma_observation=R
)
)
return (mu_filt, sigma_filt)
def additive_unscented_smoother(mu_filt, sigma_filt, f, Q):
'''Apply the Unscented Kalman Filter assuming additiven noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma_smooth : [T, n_dim_state, n_dim_state] array
sigma_smooth[t] = covariance of state at time t given observations from
times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma_smooth = np.zeros(sigma_filt.shape)
mu_smooth[-1], sigma_smooth[-1] = mu_filt[-1], sigma_filt[-1]
for t in reversed(range(T - 1)):
# get sigma points for state
mu = mu_filt[t]
sigma = sigma_filt[t]
moments_state = Moments(mu, sigma)
points_state = moments2points(moments_state)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
f_t = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = (
unscented_transform(points_state, f_t, sigma_noise=Q)
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
smoother_gain = sigma_pair.dot(linalg.pinv(moments_pred.covariance))
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
sigma_smooth[t] = (
sigma_filt[t]
+ smoother_gain
.dot(sigma_smooth[t + 1] - moments_pred.covariance)
.dot(smoother_gain.T)
)
return (mu_smooth, sigma_smooth)
class UnscentedMixin(object):
"""Methods shared by all Unscented Kalman Filter implementations."""
def __init__(self, transition_functions=None, observation_functions=None,
transition_covariance=None, observation_covariance=None,
initial_state_mean=None, initial_state_covariance=None,
n_dim_state=None, n_dim_obs=None, random_state=None):
# determine size of state and observation space
n_dim_state = _determine_dimensionality(
[(transition_covariance, array2d, -2),
(initial_state_covariance, array2d, -2),
(initial_state_mean, array1d, -1)],
n_dim_state
)
n_dim_obs = _determine_dimensionality(
[(observation_covariance, array2d, -2)],
n_dim_obs
)
# set parameters
self.transition_functions = transition_functions
self.observation_functions = observation_functions
self.transition_covariance = transition_covariance
self.observation_covariance = observation_covariance
self.initial_state_mean = initial_state_mean
self.initial_state_covariance = initial_state_covariance
self.n_dim_state = n_dim_state
self.n_dim_obs = n_dim_obs
self.random_state = random_state
def _initialize_parameters(self):
"""Retrieve parameters if they exist, else replace with defaults"""
arguments = get_params(self)
defaults = self._default_parameters()
converters = self._converters()
processed = preprocess_arguments([arguments, defaults], converters)
return (
processed['transition_functions'],
processed['observation_functions'],
processed['transition_covariance'],
processed['observation_covariance'],
processed['initial_state_mean'],
processed['initial_state_covariance']
)
def _parse_observations(self, obs):
"""Safely convert observations to their expected format"""
obs = ma.atleast_2d(obs)
if obs.shape[0] == 1 and obs.shape[1] > 1:
obs = obs.T
return obs
def _converters(self):
return {
'transition_functions': array1d,
'observation_functions': array1d,
'transition_covariance': array2d,
'observation_covariance': array2d,
'initial_state_mean': array1d,
'initial_state_covariance': array2d,
'n_dim_state': int,
'n_dim_obs': int,
'random_state': check_random_state,
}
class UnscentedKalmanFilter(UnscentedMixin):
r'''Implements the General (aka Augmented) Unscented Kalman Filter governed
by the following equations,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t, \text{Normal}(0, Q)) \\
z_{t} &= g_t(x_t, \text{Normal}(0, R))
Notice that although the input noise to the state transition equation and
the observation equation are both normally distributed, any non-linear
transformation may be applied afterwards. This allows for greater
generality, but at the expense of computational complexity. The complexity
of :class:`UnscentedKalmanFilter.filter()` is :math:`O(T(2n+m)^3)`
where :math:`T` is the number of time steps, :math:`n` is the size of the
state space, and :math:`m` is the size of the observation space.
If your noise is simply additive, consider using the
:class:`AdditiveUnscentedKalmanFilter`
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state and the transition
noise at time t and produces the state at time t+1. Also known as
:math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state and the observation
noise at time t and produces the observation at time t. Also known as
:math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def sample(self, n_timesteps, initial_state=None, random_state=None):
'''Sample from model defined by the Unscented Kalman Filter
Parameters
----------
n_timesteps : int
number of time steps
initial_state : optional, [n_dim_state] array
initial state. If unspecified, will be sampled from initial state
distribution.
random_state : optional, int or Random
random number generator
'''
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_dim_state = transition_covariance.shape[-1]
n_dim_obs = observation_covariance.shape[-1]
# logic for instantiating rng
if random_state is None:
rng = check_random_state(self.random_state)
else:
rng = check_random_state(random_state)
# logic for selecting initial state
if initial_state is None:
initial_state = rng.multivariate_normal(
initial_state_mean, initial_state_covariance
)
# logic for generating samples
x = np.zeros((n_timesteps, n_dim_state))
z = np.zeros((n_timesteps, n_dim_obs))
for t in range(n_timesteps):
if t == 0:
x[0] = initial_state
else:
transition_function = (
_last_dims(transition_functions, t - 1, ndims=1)[0]
)
transition_noise = (
rng.multivariate_normal(
np.zeros(n_dim_state),
transition_covariance.newbyteorder('=')
)
)
x[t] = transition_function(x[t - 1], transition_noise)
observation_function = (
_last_dims(observation_functions, t, ndims=1)[0]
)
observation_noise = (
rng.multivariate_normal(
np.zeros(n_dim_obs),
observation_covariance.newbyteorder('=')
)
)
z[t] = observation_function(x[t], observation_noise)
return (x, ma.asarray(z))
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = (
augmented_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# make sigma points
(points_state, points_transition, points_observation) = (
augmented_unscented_filter_points(
filtered_state_mean, filtered_state_covariance,
transition_covariance, observation_covariance
)
)
# predict
(points_pred, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, points_transition
)
)
# correct
next_filtered_state_mean, next_filtered_state_covariance = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, points_observation=points_observation
)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = self.filter(Z)
(smoothed_state_means, smoothed_state_covariances) = (
augmented_unscented_smoother(
filtered_state_means, filtered_state_covariances,
transition_functions, transition_covariance
)
)
return (smoothed_state_means, smoothed_state_covariances)
def _default_parameters(self):
return {
'transition_functions': lambda state, noise: state + noise,
'observation_functions': lambda state, noise: state + noise,
'transition_covariance': np.eye(self.n_dim_state),
'observation_covariance': np.eye(self.n_dim_obs),
'initial_state_mean': np.zeros(self.n_dim_state),
'initial_state_covariance': np.eye(self.n_dim_state),
'random_state': 0,
}
class AdditiveUnscentedKalmanFilter(UnscentedMixin):
r'''Implements the Unscented Kalman Filter with additive noise.
Observations are assumed to be generated from the following process,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t) + \text{Normal}(0, Q) \\
z_{t} &= g_t(x_t) + \text{Normal}(0, R)
While less general the general-noise Unscented Kalman Filter, the Additive
version is more computationally efficient with complexity :math:`O(Tn^3)`
where :math:`T` is the number of time steps and :math:`n` is the size of
the state space.
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state at time t and
produces the state at time t+1. Also known as :math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state at time t and
produces the observation at time t. Also known as :math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`.
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`.
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def sample(self, n_timesteps, initial_state=None, random_state=None):
'''Sample from model defined by the Unscented Kalman Filter
Parameters
----------
n_timesteps : int
number of time steps
initial_state : optional, [n_dim_state] array
initial state. If unspecified, will be sampled from initial state
distribution.
'''
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_dim_state = transition_covariance.shape[-1]
n_dim_obs = observation_covariance.shape[-1]
# logic for instantiating rng
if random_state is None:
rng = check_random_state(self.random_state)
else:
rng = check_random_state(random_state)
# logic for selecting initial state
if initial_state is None:
initial_state = (
rng.multivariate_normal(
initial_state_mean,
initial_state_covariance
)
)
# logic for generating samples
x = np.zeros((n_timesteps, n_dim_state))
z = np.zeros((n_timesteps, n_dim_obs))
for t in range(n_timesteps):
if t == 0:
x[0] = initial_state
else:
transition_function = (
_last_dims(transition_functions, t - 1, ndims=1)[0]
)
transition_noise = (
rng.multivariate_normal(
np.zeros(n_dim_state),
transition_covariance.newbyteorder('=')
)
)
x[t] = transition_function(x[t - 1]) + transition_noise
observation_function = (
_last_dims(observation_functions, t, ndims=1)[0]
)
observation_noise = (
rng.multivariate_normal(
np.zeros(n_dim_obs),
observation_covariance.newbyteorder('=')
)
)
z[t] = observation_function(x[t]) + observation_noise
return (x, ma.asarray(z))
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = (
additive_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# make sigma points
moments_state = Moments(filtered_state_mean, filtered_state_covariance)
points_state = moments2points(moments_state)
# predict
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
sigma_transition=transition_covariance
)
)
points_pred = moments2points(moments_pred)
# correct
(next_filtered_state_mean, next_filtered_state_covariance) = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, sigma_observation=observation_covariance
)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = ma.asarray(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = self.filter(Z)
(smoothed_state_means, smoothed_state_covariances) = (
additive_unscented_smoother(
filtered_state_means, filtered_state_covariances,
transition_functions, transition_covariance
)
)
return (smoothed_state_means, smoothed_state_covariances)
def _default_parameters(self):
return {
'transition_functions': lambda state: state,
'observation_functions': lambda state: state,
'transition_covariance': np.eye(self.n_dim_state),
'observation_covariance': np.eye(self.n_dim_obs),
'initial_state_mean': np.zeros(self.n_dim_state),
'initial_state_covariance': np.eye(self.n_dim_state),
'random_state': 0,
}
|
PierrotLC/pykalman
|
pykalman/unscented.py
|
Python
|
bsd-3-clause
| 51,819
|
[
"Gaussian"
] |
fb77e2f377674defb77771246ef174c386c64382df3fd87b5459aec6d9266e85
|
#!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import os
import netCDF4
import pylab as pl
import numpy as np
import matplotlib as mpl
from netCDF4 import num2date
import Scientific.IO.NetCDF as IO
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.dates as mdates
from matplotlib.dates import YearLocator,MonthLocator,DateFormatter
pl.close('all')
########################## OBS location:
# all the plot file shoud be here:
OBSDIR='/Users/ctang/climate/GLOBALDATA/OBSDATA/Meteo-France/39site'
ProjOBStxt=['monthly mean of spatial mean',\
'yearly mean']
OBStxtfile=['monthly.meteofrance.missing.removed',\
'yearly.meteofrance.missing.removed']
# to prepare the plot file
#os.environ['OBStxtfile']=str(OBStxtfile[0])
#os.system(" awk 'NR>1{print $2,$6*(10000/3600/24/30)}' $OBStxtfile > time.wm2.temp")
COLOR=['black','dodgerblue','deeppink','darkgreen',\
'brown','chocolate',\
'green','yellowgreen','aqua','olive','teal',\
'blue','purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
linestyles=['_', '_', '_', '_', '_',\
'--','--','--', '--',\
'_', '-', '--', ':']
#================================================ CMIP5 models
print "==============================================="
#=================================================== define the Plot:
fig,ax = plt.subplots(figsize=(16,9))
plt.xlabel('Year',fontsize=16)
plt.ylabel('Surface Downwelling Solar Radiation ( W/m2 )',fontsize=16)
plt.title("MeteoFrance SSR over Reunion Island",fontsize=18)
# vertical range ylim yrange
plt.ylim(0,275)
plt.yticks(np.arange(0,275, 20))
#plt.xlim(1948,2016)
plt.grid()
ax.xaxis.set_major_locator(MonthLocator(1)) # interval = 5
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m'))
ax.fmt_xdata = DateFormatter('%Y')
#plt.xticks(np.arange(1950, 2016, 5))
#plt.tick_params(axis='both', which='major', labelsize=14)
#plt.tick_params(axis='both', which='minor', labelsize=14)
#=================================================== 3 windows
#plt.axvspan(1950, 1980, alpha=0.2, color='teal')
#=================================================== draw lines
#plt.axvline(x=2005,linewidth=2, color='gray')
plt.axhline(y=39,linewidth=2, color='gray')
##### to plot the text file:
for obs in ProjOBStxt:
infile1=OBSDIR+'/'+OBStxtfile[ProjOBStxt.index(obs)]
print('the file is == ' +infile1)
TIME,SSR,Histo= np.loadtxt(infile1, unpack=True,\
converters={ 0: mdates.strpdate2num('%Y-%m')})
print " plotting text file ================== "
print TIME
print SSR
plt.plot(TIME,SSR,\
'o-',\
label=obs,\
color=COLOR[ProjOBStxt.index(obs)+1],\
linewidth=2)
##### to plot the number of data
if(ProjOBStxt.index(obs)<1):
plt.bar(TIME,Histo,20,color="green",alpha=0.4)
plt.text(TIME[4],65,'number of records per month (of 39 site Reunion)',\
size=16,rotation=0.,
ha="center",va="center",
bbox = dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
))
fig.autofmt_xdate()
plt.legend(loc=2)
plt.show()
quit()
|
CopyChat/Plotting
|
Python/ssr_summary.meteofrance.py
|
Python
|
gpl-3.0
| 3,351
|
[
"NetCDF"
] |
581c0d70325067ba26a6508ba5d0000c073707caa607965fedcbbccf91e2ecf9
|
from sympy import (meijerg, I, S, integrate, Integral, oo, gamma, cosh, sinc,
hyperexpand, exp, simplify, sqrt, pi, erf, erfc, sin, cos,
exp_polar, polygamma, hyper, log, expand_func)
from sympy.integrals.meijerint import (_rewrite_single, _rewrite1,
meijerint_indefinite, _inflate_g, _create_lookup_table,
meijerint_definite, meijerint_inversion)
from sympy.utilities import default_sort_key
from sympy.utilities.pytest import slow
from sympy.utilities.randtest import (verify_numerically,
random_complex_number as randcplx)
from sympy.core.compatibility import range
from sympy.abc import x, y, a, b, c, d, s, t, z
def test_rewrite_single():
def t(expr, c, m):
e = _rewrite_single(meijerg([a], [b], [c], [d], expr), x)
assert e is not None
assert isinstance(e[0][0][2], meijerg)
assert e[0][0][2].argument.as_coeff_mul(x) == (c, (m,))
def tn(expr):
assert _rewrite_single(meijerg([a], [b], [c], [d], expr), x) is None
t(x, 1, x)
t(x**2, 1, x**2)
t(x**2 + y*x**2, y + 1, x**2)
tn(x**2 + x)
tn(x**y)
def u(expr, x):
from sympy import Add, exp, exp_polar
r = _rewrite_single(expr, x)
e = Add(*[res[0]*res[2] for res in r[0]]).replace(
exp_polar, exp) # XXX Hack?
assert verify_numerically(e, expr, x)
u(exp(-x)*sin(x), x)
# The following has stopped working because hyperexpand changed slightly.
# It is probably not worth fixing
#u(exp(-x)*sin(x)*cos(x), x)
# This one cannot be done numerically, since it comes out as a g-function
# of argument 4*pi
# NOTE This also tests a bug in inverse mellin transform (which used to
# turn exp(4*pi*I*t) into a factor of exp(4*pi*I)**t instead of
# exp_polar).
#u(exp(x)*sin(x), x)
assert _rewrite_single(exp(x)*sin(x), x) == \
([(-sqrt(2)/(2*sqrt(pi)), 0,
meijerg(((-S(1)/2, 0, S(1)/4, S(1)/2, S(3)/4), (1,)),
((), (-S(1)/2, 0)), 64*exp_polar(-4*I*pi)/x**4))], True)
def test_rewrite1():
assert _rewrite1(x**3*meijerg([a], [b], [c], [d], x**2 + y*x**2)*5, x) == \
(5, x**3, [(1, 0, meijerg([a], [b], [c], [d], x**2*(y + 1)))], True)
def test_meijerint_indefinite_numerically():
def t(fac, arg):
g = meijerg([a], [b], [c], [d], arg)*fac
subs = {a: randcplx()/10, b: randcplx()/10 + I,
c: randcplx(), d: randcplx()}
integral = meijerint_indefinite(g, x)
assert integral is not None
assert verify_numerically(g.subs(subs), integral.diff(x).subs(subs), x)
t(1, x)
t(2, x)
t(1, 2*x)
t(1, x**2)
t(5, x**S('3/2'))
t(x**3, x)
t(3*x**S('3/2'), 4*x**S('7/3'))
def test_meijerint_definite():
v, b = meijerint_definite(x, x, 0, 0)
assert v.is_zero and b is True
v, b = meijerint_definite(x, x, oo, oo)
assert v.is_zero and b is True
def test_inflate():
subs = {a: randcplx()/10, b: randcplx()/10 + I, c: randcplx(),
d: randcplx(), y: randcplx()/10}
def t(a, b, arg, n):
from sympy import Mul
m1 = meijerg(a, b, arg)
m2 = Mul(*_inflate_g(m1, n))
# NOTE: (the random number)**9 must still be on the principal sheet.
# Thus make b&d small to create random numbers of small imaginary part.
return verify_numerically(m1.subs(subs), m2.subs(subs), x, b=0.1, d=-0.1)
assert t([[a], [b]], [[c], [d]], x, 3)
assert t([[a, y], [b]], [[c], [d]], x, 3)
assert t([[a], [b]], [[c, y], [d]], 2*x**3, 3)
def test_recursive():
from sympy import symbols
a, b, c = symbols('a b c', positive=True)
r = exp(-(x - a)**2)*exp(-(x - b)**2)
e = integrate(r, (x, 0, oo), meijerg=True)
assert simplify(e.expand()) == (
sqrt(2)*sqrt(pi)*(
(erf(sqrt(2)*(a + b)/2) + 1)*exp(-a**2/2 + a*b - b**2/2))/4)
e = integrate(exp(-(x - a)**2)*exp(-(x - b)**2)*exp(c*x), (x, 0, oo), meijerg=True)
assert simplify(e) == (
sqrt(2)*sqrt(pi)*(erf(sqrt(2)*(2*a + 2*b + c)/4) + 1)*exp(-a**2 - b**2
+ (2*a + 2*b + c)**2/8)/4)
assert simplify(integrate(exp(-(x - a - b - c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 + erf(a + b + c))
assert simplify(integrate(exp(-(x + a + b + c)**2), (x, 0, oo), meijerg=True)) == \
sqrt(pi)/2*(1 - erf(a + b + c))
@slow
def test_meijerint():
from sympy import symbols, expand, arg
s, t, mu = symbols('s t mu', real=True)
assert integrate(meijerg([], [], [0], [], s*t)
*meijerg([], [], [mu/2], [-mu/2], t**2/4),
(t, 0, oo)).is_Piecewise
s = symbols('s', positive=True)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo)) == \
gamma(s + 1)
assert integrate(x**s*meijerg([[], []], [[0], []], x), (x, 0, oo),
meijerg=True) == gamma(s + 1)
assert isinstance(integrate(x**s*meijerg([[], []], [[0], []], x),
(x, 0, oo), meijerg=False),
Integral)
assert meijerint_indefinite(exp(x), x) == exp(x)
# TODO what simplifications should be done automatically?
# This tests "extra case" for antecedents_1.
a, b = symbols('a b', positive=True)
assert simplify(meijerint_definite(x**a, x, 0, b)[0]) == \
b**(a + 1)/(a + 1)
# This tests various conditions and expansions:
meijerint_definite((x + 1)**3*exp(-x), x, 0, oo) == (16, True)
# Again, how about simplifications?
sigma, mu = symbols('sigma mu', positive=True)
i, c = meijerint_definite(exp(-((x - mu)/(2*sigma))**2), x, 0, oo)
assert simplify(i) == sqrt(pi)*sigma*(2 - erfc(mu/(2*sigma)))
assert c == True
i, _ = meijerint_definite(exp(-mu*x)*exp(sigma*x), x, 0, oo)
# TODO it would be nice to test the condition
assert simplify(i) == 1/(mu - sigma)
# Test substitutions to change limits
assert meijerint_definite(exp(x), x, -oo, 2) == (exp(2), True)
# Note: causes a NaN in _check_antecedents
assert expand(meijerint_definite(exp(x), x, 0, I)[0]) == exp(I) - 1
assert expand(meijerint_definite(exp(-x), x, 0, x)[0]) == \
1 - exp(-exp(I*arg(x))*abs(x))
# Test -oo to oo
assert meijerint_definite(exp(-x**2), x, -oo, oo) == (sqrt(pi), True)
assert meijerint_definite(exp(-abs(x)), x, -oo, oo) == (2, True)
assert meijerint_definite(exp(-(2*x - 3)**2), x, -oo, oo) == \
(sqrt(pi)/2, True)
assert meijerint_definite(exp(-abs(2*x - 3)), x, -oo, oo) == (1, True)
assert meijerint_definite(exp(-((x - mu)/sigma)**2/2)/sqrt(2*pi*sigma**2),
x, -oo, oo) == (1, True)
assert meijerint_definite(sinc(x)**2, x, -oo, oo) == (pi, True)
# Test one of the extra conditions for 2 g-functinos
assert meijerint_definite(exp(-x)*sin(x), x, 0, oo) == (S(1)/2, True)
# Test a bug
def res(n):
return (1/(1 + x**2)).diff(x, n).subs(x, 1)*(-1)**n
for n in range(6):
assert integrate(exp(-x)*sin(x)*x**n, (x, 0, oo), meijerg=True) == \
res(n)
# This used to test trigexpand... now it is done by linear substitution
assert simplify(integrate(exp(-x)*sin(x + a), (x, 0, oo), meijerg=True)
) == sqrt(2)*sin(a + pi/4)/2
# Test the condition 14 from prudnikov.
# (This is besselj*besselj in disguise, to stop the product from being
# recognised in the tables.)
a, b, s = symbols('a b s')
from sympy import And, re
assert meijerint_definite(meijerg([], [], [a/2], [-a/2], x/4)
*meijerg([], [], [b/2], [-b/2], x/4)*x**(s - 1), x, 0, oo) == \
(4*2**(2*s - 2)*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/(gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
And(0 < -2*re(4*s) + 8, 0 < re(a/2 + b/2 + s), re(2*s) < 1))
# test a bug
assert integrate(sin(x**a)*sin(x**b), (x, 0, oo), meijerg=True) == \
Integral(sin(x**a)*sin(x**b), (x, 0, oo))
# test better hyperexpand
assert integrate(exp(-x**2)*log(x), (x, 0, oo), meijerg=True) == \
(sqrt(pi)*polygamma(0, S(1)/2)/4).expand()
# Test hyperexpand bug.
from sympy import lowergamma
n = symbols('n', integer=True)
assert simplify(integrate(exp(-x)*x**n, x, meijerg=True)) == \
lowergamma(n + 1, x)
# Test a bug with argument 1/x
alpha = symbols('alpha', positive=True)
assert meijerint_definite((2 - x)**alpha*sin(alpha/x), x, 0, 2) == \
(sqrt(pi)*alpha*gamma(alpha + 1)*meijerg(((), (alpha/2 + S(1)/2,
alpha/2 + 1)), ((0, 0, S(1)/2), (-S(1)/2,)), alpha**S(2)/16)/4, True)
# test a bug related to 3016
a, s = symbols('a s', positive=True)
assert simplify(integrate(x**s*exp(-a*x**2), (x, -oo, oo))) == \
a**(-s/2 - S(1)/2)*((-1)**s + 1)*gamma(s/2 + S(1)/2)/2
def test_bessel():
from sympy import besselj, besseli
assert simplify(integrate(besselj(a, z)*besselj(b, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == \
2*sin(pi*(a/2 - b/2))/(pi*(a - b)*(a + b))
assert simplify(integrate(besselj(a, z)*besselj(a, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == 1/(2*a)
# TODO more orthogonality integrals
assert simplify(integrate(sin(z*x)*(x**2 - 1)**(-(y + S(1)/2)),
(x, 1, oo), meijerg=True, conds='none')
*2/((z/2)**y*sqrt(pi)*gamma(S(1)/2 - y))) == \
besselj(y, z)
# Werner Rosenheinrich
# SOME INDEFINITE INTEGRALS OF BESSEL FUNCTIONS
assert integrate(x*besselj(0, x), x, meijerg=True) == x*besselj(1, x)
assert integrate(x*besseli(0, x), x, meijerg=True) == x*besseli(1, x)
# TODO can do higher powers, but come out as high order ... should they be
# reduced to order 0, 1?
assert integrate(besselj(1, x), x, meijerg=True) == -besselj(0, x)
assert integrate(besselj(1, x)**2/x, x, meijerg=True) == \
-(besselj(0, x)**2 + besselj(1, x)**2)/2
# TODO more besseli when tables are extended or recursive mellin works
assert integrate(besselj(0, x)**2/x**2, x, meijerg=True) == \
-2*x*besselj(0, x)**2 - 2*x*besselj(1, x)**2 \
+ 2*besselj(0, x)*besselj(1, x) - besselj(0, x)**2/x
assert integrate(besselj(0, x)*besselj(1, x), x, meijerg=True) == \
-besselj(0, x)**2/2
assert integrate(x**2*besselj(0, x)*besselj(1, x), x, meijerg=True) == \
x**2*besselj(1, x)**2/2
assert integrate(besselj(0, x)*besselj(1, x)/x, x, meijerg=True) == \
(x*besselj(0, x)**2 + x*besselj(1, x)**2 -
besselj(0, x)*besselj(1, x))
# TODO how does besselj(0, a*x)*besselj(0, b*x) work?
# TODO how does besselj(0, x)**2*besselj(1, x)**2 work?
# TODO sin(x)*besselj(0, x) etc come out a mess
# TODO can x*log(x)*besselj(0, x) be done?
# TODO how does besselj(1, x)*besselj(0, x+a) work?
# TODO more indefinite integrals when struve functions etc are implemented
# test a substitution
assert integrate(besselj(1, x**2)*x, x, meijerg=True) == \
-besselj(0, x**2)/2
def test_inversion():
from sympy import piecewise_fold, besselj, sqrt, sin, cos, Heaviside
def inv(f):
return piecewise_fold(meijerint_inversion(f, s, t))
assert inv(1/(s**2 + 1)) == sin(t)*Heaviside(t)
assert inv(s/(s**2 + 1)) == cos(t)*Heaviside(t)
assert inv(exp(-s)/s) == Heaviside(t - 1)
assert inv(1/sqrt(1 + s**2)) == besselj(0, t)*Heaviside(t)
# Test some antcedents checking.
assert meijerint_inversion(sqrt(s)/sqrt(1 + s**2), s, t) is None
assert inv(exp(s**2)) is None
assert meijerint_inversion(exp(-s**2), s, t) is None
@slow
def test_lookup_table():
from random import uniform, randrange
from sympy import Add
from sympy.integrals.meijerint import z as z_dummy
table = {}
_create_lookup_table(table)
for _, l in sorted(table.items()):
for formula, terms, cond, hint in sorted(l, key=default_sort_key):
subs = {}
for a in list(formula.free_symbols) + [z_dummy]:
if hasattr(a, 'properties') and a.properties:
# these Wilds match positive integers
subs[a] = randrange(1, 10)
else:
subs[a] = uniform(1.5, 2.0)
if not isinstance(terms, list):
terms = terms(subs)
# First test that hyperexpand can do this.
expanded = [hyperexpand(g) for (_, g) in terms]
assert all(x.is_Piecewise or not x.has(meijerg) for x in expanded)
# Now test that the meijer g-function is indeed as advertised.
expanded = Add(*[f*x for (f, x) in terms])
a, b = formula.n(subs=subs), expanded.n(subs=subs)
r = min(abs(a), abs(b))
if r < 1:
assert abs(a - b).n() <= 1e-10
else:
assert (abs(a - b)/r).n() <= 1e-10
def test_branch_bug():
from sympy import powdenest, lowergamma
# TODO combsimp cannot prove that the factor is unity
assert powdenest(integrate(erf(x**3), x, meijerg=True).diff(x),
polar=True) == 2*erf(x**3)*gamma(S(2)/3)/3/gamma(S(5)/3)
assert integrate(erf(x**3), x, meijerg=True) == \
2*x*erf(x**3)*gamma(S(2)/3)/(3*gamma(S(5)/3)) \
- 2*gamma(S(2)/3)*lowergamma(S(2)/3, x**6)/(3*sqrt(pi)*gamma(S(5)/3))
def test_linear_subs():
from sympy import besselj
assert integrate(sin(x - 1), x, meijerg=True) == -cos(1 - x)
assert integrate(besselj(1, x - 1), x, meijerg=True) == -besselj(0, 1 - x)
@slow
def test_probability():
# various integrals from probability theory
from sympy.abc import x, y
from sympy import symbols, Symbol, Abs, expand_mul, combsimp, powsimp, sin
mu1, mu2 = symbols('mu1 mu2', real=True, nonzero=True, finite=True)
sigma1, sigma2 = symbols('sigma1 sigma2', real=True, nonzero=True,
finite=True, positive=True)
rate = Symbol('lambda', real=True, positive=True, finite=True)
def normal(x, mu, sigma):
return 1/sqrt(2*pi*sigma**2)*exp(-(x - mu)**2/2/sigma**2)
def exponential(x, rate):
return rate*exp(-rate*x)
assert integrate(normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == \
mu1
assert integrate(x**2*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**2 + sigma1**2
assert integrate(x**3*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**3 + 3*mu1*sigma1**2
assert integrate(normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1
assert integrate(y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu2
assert integrate(x*y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1*mu2
assert integrate((x + y + 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + mu1 + mu2
assert integrate((x + y - 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
-1 + mu1 + mu2
i = integrate(x**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True)
assert not i.has(Abs)
assert simplify(i) == mu1**2 + sigma1**2
assert integrate(y**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
sigma2**2 + mu2**2
assert integrate(exponential(x, rate), (x, 0, oo), meijerg=True) == 1
assert integrate(x*exponential(x, rate), (x, 0, oo), meijerg=True) == \
1/rate
assert integrate(x**2*exponential(x, rate), (x, 0, oo), meijerg=True) == \
2/rate**2
def E(expr):
res1 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(x, 0, oo), (y, -oo, oo), meijerg=True)
res2 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(y, -oo, oo), (x, 0, oo), meijerg=True)
assert expand_mul(res1) == expand_mul(res2)
return res1
assert E(1) == 1
assert E(x*y) == mu1/rate
assert E(x*y**2) == mu1**2/rate + sigma1**2/rate
ans = sigma1**2 + 1/rate**2
assert simplify(E((x + y + 1)**2) - E(x + y + 1)**2) == ans
assert simplify(E((x + y - 1)**2) - E(x + y - 1)**2) == ans
assert simplify(E((x + y)**2) - E(x + y)**2) == ans
# Beta' distribution
alpha, beta = symbols('alpha beta', positive=True)
betadist = x**(alpha - 1)*(1 + x)**(-alpha - beta)*gamma(alpha + beta) \
/gamma(alpha)/gamma(beta)
assert integrate(betadist, (x, 0, oo), meijerg=True) == 1
i = integrate(x*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert (combsimp(i[0]), i[1]) == (alpha/(beta - 1), 1 < beta)
j = integrate(x**2*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert j[1] == (1 < beta - 1)
assert combsimp(j[0] - i[0]**2) == (alpha + beta - 1)*alpha \
/(beta - 2)/(beta - 1)**2
# Beta distribution
# NOTE: this is evaluated using antiderivatives. It also tests that
# meijerint_indefinite returns the simplest possible answer.
a, b = symbols('a b', positive=True)
betadist = x**(a - 1)*(-x + 1)**(b - 1)*gamma(a + b)/(gamma(a)*gamma(b))
assert simplify(integrate(betadist, (x, 0, 1), meijerg=True)) == 1
assert simplify(integrate(x*betadist, (x, 0, 1), meijerg=True)) == \
a/(a + b)
assert simplify(integrate(x**2*betadist, (x, 0, 1), meijerg=True)) == \
a*(a + 1)/(a + b)/(a + b + 1)
assert simplify(integrate(x**y*betadist, (x, 0, 1), meijerg=True)) == \
gamma(a + b)*gamma(a + y)/gamma(a)/gamma(a + b + y)
# Chi distribution
k = Symbol('k', integer=True, positive=True)
chi = 2**(1 - k/2)*x**(k - 1)*exp(-x**2/2)/gamma(k/2)
assert powsimp(integrate(chi, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chi, (x, 0, oo), meijerg=True)) == \
sqrt(2)*gamma((k + 1)/2)/gamma(k/2)
assert simplify(integrate(x**2*chi, (x, 0, oo), meijerg=True)) == k
# Chi^2 distribution
chisquared = 2**(-k/2)/gamma(k/2)*x**(k/2 - 1)*exp(-x/2)
assert powsimp(integrate(chisquared, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chisquared, (x, 0, oo), meijerg=True)) == k
assert simplify(integrate(x**2*chisquared, (x, 0, oo), meijerg=True)) == \
k*(k + 2)
assert combsimp(integrate(((x - k)/sqrt(2*k))**3*chisquared, (x, 0, oo),
meijerg=True)) == 2*sqrt(2)/sqrt(k)
# Dagum distribution
a, b, p = symbols('a b p', positive=True)
# XXX (x/b)**a does not work
dagum = a*p/x*(x/b)**(a*p)/(1 + x**a/b**a)**(p + 1)
assert simplify(integrate(dagum, (x, 0, oo), meijerg=True)) == 1
# XXX conditions are a mess
arg = x*dagum
assert simplify(integrate(arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b*gamma(1 - 1/a)*gamma(p + 1 + 1/a)/(
(a*p + 1)*gamma(p))
assert simplify(integrate(x*arg, (x, 0, oo), meijerg=True, conds='none')
) == a*b**2*gamma(1 - 2/a)*gamma(p + 1 + 2/a)/(
(a*p + 2)*gamma(p))
# F-distribution
d1, d2 = symbols('d1 d2', positive=True)
f = sqrt(((d1*x)**d1 * d2**d2)/(d1*x + d2)**(d1 + d2))/x \
/gamma(d1/2)/gamma(d2/2)*gamma((d1 + d2)/2)
assert simplify(integrate(f, (x, 0, oo), meijerg=True)) == 1
# TODO conditions are a mess
assert simplify(integrate(x*f, (x, 0, oo), meijerg=True, conds='none')
) == d2/(d2 - 2)
assert simplify(integrate(x**2*f, (x, 0, oo), meijerg=True, conds='none')
) == d2**2*(d1 + 2)/d1/(d2 - 4)/(d2 - 2)
# TODO gamma, rayleigh
# inverse gaussian
lamda, mu = symbols('lamda mu', positive=True)
dist = sqrt(lamda/2/pi)*x**(-S(3)/2)*exp(-lamda*(x - mu)**2/x/2/mu**2)
mysimp = lambda expr: simplify(expr.rewrite(exp))
assert mysimp(integrate(dist, (x, 0, oo))) == 1
assert mysimp(integrate(x*dist, (x, 0, oo))) == mu
assert mysimp(integrate((x - mu)**2*dist, (x, 0, oo))) == mu**3/lamda
assert mysimp(integrate((x - mu)**3*dist, (x, 0, oo))) == 3*mu**5/lamda**2
# Levi
c = Symbol('c', positive=True)
assert integrate(sqrt(c/2/pi)*exp(-c/2/(x - mu))/(x - mu)**S('3/2'),
(x, mu, oo)) == 1
# higher moments oo
# log-logistic
distn = (beta/alpha)*x**(beta - 1)/alpha**(beta - 1)/ \
(1 + x**beta/alpha**beta)**2
assert simplify(integrate(distn, (x, 0, oo))) == 1
# NOTE the conditions are a mess, but correctly state beta > 1
assert simplify(integrate(x*distn, (x, 0, oo), conds='none')) == \
pi*alpha/beta/sin(pi/beta)
# (similar comment for conditions applies)
assert simplify(integrate(x**y*distn, (x, 0, oo), conds='none')) == \
pi*alpha**y*y/beta/sin(pi*y/beta)
# weibull
k = Symbol('k', positive=True)
n = Symbol('n', positive=True)
distn = k/lamda*(x/lamda)**(k - 1)*exp(-(x/lamda)**k)
assert simplify(integrate(distn, (x, 0, oo))) == 1
assert simplify(integrate(x**n*distn, (x, 0, oo))) == \
lamda**n*gamma(1 + n/k)
# rice distribution
from sympy import besseli
nu, sigma = symbols('nu sigma', positive=True)
rice = x/sigma**2*exp(-(x**2 + nu**2)/2/sigma**2)*besseli(0, x*nu/sigma**2)
assert integrate(rice, (x, 0, oo), meijerg=True) == 1
# can someone verify higher moments?
# Laplace distribution
mu = Symbol('mu', real=True)
b = Symbol('b', positive=True)
laplace = exp(-abs(x - mu)/b)/2/b
assert integrate(laplace, (x, -oo, oo), meijerg=True) == 1
assert integrate(x*laplace, (x, -oo, oo), meijerg=True) == mu
assert integrate(x**2*laplace, (x, -oo, oo), meijerg=True) == \
2*b**2 + mu**2
# TODO are there other distributions supported on (-oo, oo) that we can do?
# misc tests
k = Symbol('k', positive=True)
assert combsimp(expand_mul(integrate(log(x)*x**(k - 1)*exp(-x)/gamma(k),
(x, 0, oo)))) == polygamma(0, k)
def test_expint():
""" Test various exponential integrals. """
from sympy import (expint, unpolarify, Symbol, Ci, Si, Shi, Chi,
sin, cos, sinh, cosh, Ei)
assert simplify(unpolarify(integrate(exp(-z*x)/x**y, (x, 1, oo),
meijerg=True, conds='none'
).rewrite(expint).expand(func=True))) == expint(y, z)
assert integrate(exp(-z*x)/x, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(1, z)
assert integrate(exp(-z*x)/x**2, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(2, z).rewrite(Ei).rewrite(expint)
assert integrate(exp(-z*x)/x**3, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(3, z).rewrite(Ei).rewrite(expint).expand()
t = Symbol('t', positive=True)
assert integrate(-cos(x)/x, (x, t, oo), meijerg=True).expand() == Ci(t)
assert integrate(-sin(x)/x, (x, t, oo), meijerg=True).expand() == \
Si(t) - pi/2
assert integrate(sin(x)/x, (x, 0, z), meijerg=True) == Si(z)
assert integrate(sinh(x)/x, (x, 0, z), meijerg=True) == Shi(z)
assert integrate(exp(-x)/x, x, meijerg=True).expand().rewrite(expint) == \
I*pi - expint(1, x)
assert integrate(exp(-x)/x**2, x, meijerg=True).rewrite(expint).expand() \
== expint(1, x) - exp(-x)/x - I*pi
u = Symbol('u', polar=True)
assert integrate(cos(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Ci(u)
assert integrate(cosh(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Chi(u)
assert integrate(expint(1, x), x, meijerg=True
).rewrite(expint).expand() == x*expint(1, x) - exp(-x)
assert integrate(expint(2, x), x, meijerg=True
).rewrite(expint).expand() == \
-x**2*expint(1, x)/2 + x*exp(-x)/2 - exp(-x)/2
assert simplify(unpolarify(integrate(expint(y, x), x,
meijerg=True).rewrite(expint).expand(func=True))) == \
-expint(y + 1, x)
assert integrate(Si(x), x, meijerg=True) == x*Si(x) + cos(x)
assert integrate(Ci(u), u, meijerg=True).expand() == u*Ci(u) - sin(u)
assert integrate(Shi(x), x, meijerg=True) == x*Shi(x) - cosh(x)
assert integrate(Chi(u), u, meijerg=True).expand() == u*Chi(u) - sinh(u)
assert integrate(Si(x)*exp(-x), (x, 0, oo), meijerg=True) == pi/4
assert integrate(expint(1, x)*sin(x), (x, 0, oo), meijerg=True) == log(2)/2
def test_messy():
from sympy import (laplace_transform, Si, Shi, Chi, atan, Piecewise,
acoth, E1, besselj, acosh, asin, And, re,
fourier_transform, sqrt)
assert laplace_transform(Si(x), x, s) == ((-atan(s) + pi/2)/s, 0, True)
assert laplace_transform(Shi(x), x, s) == (acoth(s)/s, 1, True)
# where should the logs be simplified?
assert laplace_transform(Chi(x), x, s) == \
((log(s**(-2)) - log((s**2 - 1)/s**2))/(2*s), 1, True)
# TODO maybe simplify the inequalities?
assert laplace_transform(besselj(a, x), x, s)[1:] == \
(0, And(S(0) < re(a/2) + S(1)/2, S(0) < re(a/2) + 1))
# NOTE s < 0 can be done, but argument reduction is not good enough yet
assert fourier_transform(besselj(1, x)/x, x, s, noconds=False) == \
(Piecewise((0, 4*abs(pi**2*s**2) > 1),
(2*sqrt(-4*pi**2*s**2 + 1), True)), s > 0)
# TODO FT(besselj(0,x)) - conditions are messy (but for acceptable reasons)
# - folding could be better
assert integrate(E1(x)*besselj(0, x), (x, 0, oo), meijerg=True) == \
log(1 + sqrt(2))
assert integrate(E1(x)*besselj(1, x), (x, 0, oo), meijerg=True) == \
log(S(1)/2 + sqrt(2)/2)
assert integrate(1/x/sqrt(1 - x**2), x, meijerg=True) == \
Piecewise((-acosh(1/x), 1 < abs(x**(-2))), (I*asin(1/x), True))
def test_issue_6122():
assert integrate(exp(-I*x**2), (x, -oo, oo), meijerg=True) == \
-I*sqrt(pi)*exp(I*pi/4)
def test_issue_6252():
expr = 1/x/(a + b*x)**(S(1)/3)
anti = integrate(expr, x, meijerg=True)
assert not expr.has(hyper)
# XXX the expression is a mess, but actually upon differentiation and
# putting in numerical values seems to work...
def test_issue_6348():
assert integrate(exp(I*x)/(1 + x**2), (x, -oo, oo)).simplify().rewrite(exp) \
== pi*exp(-1)
def test_fresnel():
from sympy import fresnels, fresnelc
assert expand_func(integrate(sin(pi*x**2/2), x)) == fresnels(x)
assert expand_func(integrate(cos(pi*x**2/2), x)) == fresnelc(x)
def test_issue_6860():
assert meijerint_indefinite(x**x**x, x) is None
def test_issue_7337():
f = meijerint_indefinite(x*sqrt(2*x + 3), x).together()
assert f == sqrt(2*x + 3)*(2*x**2 + x - 3)/5
assert f._eval_interval(x, S(-1), S(1)) == S(2)/5
def test_issue_8368():
assert meijerint_indefinite(cosh(x)*exp(-x*t), x) == (
(-t - 1)*exp(x) + (-t + 1)*exp(-x))*exp(-t*x)/2/(t**2 - 1)
def test_issue_10211():
from sympy.abc import h, w
assert integrate((1/sqrt(((y-x)**2 + h**2))**3), (x,0,w), (y,0,w)) == \
2*sqrt(1 + w**2/h**2)/h - 2/h
def test_issue_11806():
from sympy import symbols
y, L = symbols('y L', positive=True)
assert integrate(1/sqrt(x**2 + y**2)**3, (x, -L, L)) == \
2*L/(y**2*sqrt(L**2 + y**2))
def test_issue_10681():
from sympy import RR
from sympy.abc import R, r
f = integrate(r**2*(R**2-r**2)**0.5, r, meijerg=True)
g = (1.0/3)*R**1.0*r**3*hyper((-0.5, S(3)/2), (S(5)/2,),
r**2*exp_polar(2*I*pi)/R**2)
assert RR.almosteq((f/g).n(), 1.0, 1e-12)
|
NikNitro/Python-iBeacon-Scan
|
sympy/integrals/tests/test_meijerint.py
|
Python
|
gpl-3.0
| 28,412
|
[
"Gaussian"
] |
9bc663a17fbec2d7d126643c53f78314e37f1d01ab427b3544573b88d00e2b25
|
"""
Acceptance tests for studio related to the outline page.
"""
import json
from datetime import datetime, timedelta
import itertools
from pytz import UTC
from bok_choy.promise import EmptyPromise
from nose.plugins.attrib import attr
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.overview import CourseOutlinePage, ContainerPage, ExpandCollapseLinkState
from ...pages.studio.utils import add_discussion, drag, verify_ordering
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.config import ConfigModelFixture
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
from ..helpers import load_data_str
from ...pages.lms.progress import ProgressPage
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
class CourseOutlineTest(StudioCourseTest):
"""
Base class for all course outline tests
"""
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseOutlineTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.advanced_settings = AdvancedSettingsPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
""" Install a course with sections/problems, tabs, updates, and handouts """
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML Component'),
XBlockFixtureDesc('discussion', 'Test Discussion Component')
)
)
)
)
def do_action_and_verify(self, outline_page, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
if outline_page is None:
outline_page = self.course_outline_page.visit()
action(outline_page)
verify_ordering(self, outline_page, expected_ordering)
# Reload the page and expand all subsections to see that the change was persisted.
outline_page = self.course_outline_page.visit()
outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').click()
verify_ordering(self, outline_page, expected_ordering)
@attr('shard_3')
class CourseOutlineDragAndDropTest(CourseOutlineTest):
"""
Tests of drag and drop within the outline page.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Create a course with one section, two subsections, and four units
"""
# with collapsed outline
self.chap_1_handle = 0
self.chap_1_seq_1_handle = 1
# with first sequential expanded
self.seq_1_vert_1_handle = 2
self.seq_1_vert_2_handle = 3
self.chap_1_seq_2_handle = 4
course_fixture.add_children(
XBlockFixtureDesc('chapter', "1").add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def drag_and_verify(self, source, target, expected_ordering, outline_page=None):
self.do_action_and_verify(
outline_page,
lambda (outline): drag(outline, source, target),
expected_ordering
)
def test_drop_unit_in_collapsed_subsection(self):
"""
Drag vertical "1.1.2" from subsection "1.1" into collapsed subsection "1.2" which already
have its own verticals.
"""
course_outline_page = self.course_outline_page.visit()
# expand first subsection
course_outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').first.click()
expected_ordering = [{"1": ["1.1", "1.2"]},
{"1.1": ["1.1.1"]},
{"1.2": ["1.1.2", "1.2.1", "1.2.2"]}]
self.drag_and_verify(self.seq_1_vert_2_handle, self.chap_1_seq_2_handle, expected_ordering, course_outline_page)
@attr('shard_3')
class WarningMessagesTest(CourseOutlineTest):
"""
Feature: Warning messages on sections, subsections, and units
"""
__test__ = True
STAFF_ONLY_WARNING = 'Contains staff only content'
LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content'
FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future'
NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released'
class PublishState(object):
"""
Default values for representing the published state of a unit
"""
NEVER_PUBLISHED = 1
UNPUBLISHED_CHANGES = 2
PUBLISHED = 3
VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED]
class UnitState(object):
""" Represents the state of a unit """
def __init__(self, is_released, publish_state, is_locked):
""" Creates a new UnitState with the given properties """
self.is_released = is_released
self.publish_state = publish_state
self.is_locked = is_locked
@property
def name(self):
""" Returns an appropriate name based on the properties of the unit """
result = "Released " if self.is_released else "Unreleased "
if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED:
result += "Never Published "
elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES:
result += "Unpublished Changes "
else:
result += "Published "
result += "Locked" if self.is_locked else "Unlocked"
return result
def populate_course_fixture(self, course_fixture):
""" Install a course with various configurations that could produce warning messages """
# Define the dimensions that map to the UnitState constructor
features = [
[True, False], # Possible values for is_released
self.PublishState.VALUES, # Possible values for publish_state
[True, False] # Possible values for is_locked
]
# Add a fixture for every state in the product of features
course_fixture.add_children(*[
self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features)
])
def _build_fixture(self, unit_state):
""" Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """
name = unit_state.name
start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat()
subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start})
# Children of never published subsections will be added on demand via _ensure_unit_present
return XBlockFixtureDesc('chapter', name).add_children(
subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED
else subsection.add_children(
XBlockFixtureDesc('vertical', name, metadata={
'visible_to_staff_only': True if unit_state.is_locked else None
})
)
)
def test_released_never_published_locked(self):
""" Tests that released never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_never_published_unlocked(self):
""" Tests that released never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_released_unpublished_changes_locked(self):
""" Tests that released unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_unpublished_changes_unlocked(self):
""" Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.LIVE_UNPUBLISHED_WARNING
)
def test_released_published_locked(self):
""" Tests that released published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_published_unlocked(self):
""" Tests that released published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def test_unreleased_never_published_locked(self):
""" Tests that unreleased never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_never_published_unlocked(self):
""" Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_unreleased_unpublished_changes_locked(self):
""" Tests that unreleased unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_unpublished_changes_unlocked(self):
"""
Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will
release in the future'
"""
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.FUTURE_UNPUBLISHED_WARNING
)
def test_unreleased_published_locked(self):
""" Tests that unreleased published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_published_unlocked(self):
""" Tests that unreleased published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def _verify_unit_warning(self, unit_state, expected_status_message):
"""
Verifies that the given unit's messages match the expected messages.
If expected_status_message is None, then the unit status message is expected to not be present.
"""
self._ensure_unit_present(unit_state)
self.course_outline_page.visit()
section = self.course_outline_page.section(unit_state.name)
subsection = section.subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
if expected_status_message == self.STAFF_ONLY_WARNING:
self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING)
else:
self.assertFalse(section.has_status_message)
self.assertFalse(subsection.has_status_message)
if expected_status_message:
self.assertEqual(unit.status_message, expected_status_message)
else:
self.assertFalse(unit.has_status_message)
def _ensure_unit_present(self, unit_state):
""" Ensures that a unit with the given state is present on the course outline """
if unit_state.publish_state == self.PublishState.PUBLISHED:
return
name = unit_state.name
self.course_outline_page.visit()
subsection = self.course_outline_page.section(name).subsection(name)
subsection.expand_subsection()
if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES:
unit = subsection.unit(name).go_to()
add_discussion(unit)
elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED:
subsection.add_unit()
unit = ContainerPage(self.browser, None)
unit.wait_for_page()
if unit.is_staff_locked != unit_state.is_locked:
unit.toggle_staff_lock()
@attr('shard_3')
class EditingSectionsTest(CourseOutlineTest):
"""
Feature: Editing Release date, Due date and grading type.
"""
__test__ = True
def test_can_edit_subsection(self):
"""
Scenario: I can edit settings of subsection.
Given that I have created a subsection
Then I see release date, due date and grading policy of subsection in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date, due date and grading policy fields present
And they have correct initial values
Then I set new values for these fields
And I click save button on the modal
Then I see release date, due date and grading policy of subsection in course outline
"""
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(subsection.release_date)
# Verify that Due date and Policy hidden by default
self.assertFalse(subsection.due_date)
self.assertFalse(subsection.policy)
modal = subsection.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertTrue(modal.has_release_time())
self.assertTrue(modal.has_due_date())
self.assertTrue(modal.has_due_time())
self.assertTrue(modal.has_policy())
# Verify initial values
self.assertEqual(modal.release_date, u'1/1/1970')
self.assertEqual(modal.release_time, u'00:00')
self.assertEqual(modal.due_date, u'')
self.assertEqual(modal.due_time, u'')
self.assertEqual(modal.policy, u'Not Graded')
# Set new values
modal.release_date = '3/12/1972'
modal.release_time = '04:01'
modal.due_date = '7/21/2014'
modal.due_time = '23:39'
modal.policy = 'Lab'
modal.save()
self.assertIn(u'Released: Mar 12, 1972', subsection.release_date)
self.assertIn(u'04:01', subsection.release_date)
self.assertIn(u'Due: Jul 21, 2014', subsection.due_date)
self.assertIn(u'23:39', subsection.due_date)
self.assertIn(u'Lab', subsection.policy)
def test_can_edit_section(self):
"""
Scenario: I can edit settings of section.
Given that I have created a section
Then I see release date of section in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date field present
And it has correct initial value
Then I set new value for this field
And I click save button on the modal
Then I see release date of section in course outline
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
modal = section.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertFalse(modal.has_due_date())
self.assertFalse(modal.has_policy())
# Verify initial value
self.assertEqual(modal.release_date, u'1/1/1970')
# Set new value
modal.release_date = '5/14/1969'
modal.save()
self.assertIn(u'Released: May 14, 1969', section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
def test_subsection_is_graded_in_lms(self):
"""
Scenario: I can grade subsection from course outline page.
Given I visit progress page
And I see that problem in subsection has grading type "Practice"
Then I visit course outline page
And I click on the configuration icon of subsection
And I set grading policy to "Lab"
And I click save button on the modal
Then I visit progress page
And I see that problem in subsection has grading type "Problem"
"""
progress_page = ProgressPage(self.browser, self.course_id)
progress_page.visit()
progress_page.wait_for_page()
self.assertEqual(u'Practice', progress_page.grading_formats[0])
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
modal = subsection.edit()
# Set new values
modal.policy = 'Lab'
modal.save()
progress_page.visit()
self.assertEqual(u'Problem', progress_page.grading_formats[0])
def test_unchanged_release_date_is_not_saved(self):
"""
Scenario: Saving a subsection without changing the release date will not override the release date
Given that I have created a section with a subsection
When I open the settings modal for the subsection
And I pressed save
And I open the settings modal for the section
And I change the release date to 07/20/1969
And I press save
Then the subsection and the section have the release date 07/20/1969
"""
self.course_outline_page.visit()
modal = self.course_outline_page.section_at(0).subsection_at(0).edit()
modal.save()
modal = self.course_outline_page.section_at(0).edit()
modal.release_date = '7/20/1969'
modal.save()
release_text = 'Released: Jul 20, 1969'
self.assertIn(release_text, self.course_outline_page.section_at(0).release_date)
self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date)
@attr('shard_3')
class StaffLockTest(CourseOutlineTest):
"""
Feature: Sections, subsections, and units can be locked and unlocked from the course outline.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Create a course with one section, two subsections, and four units """
course_fixture.add_children(
XBlockFixtureDesc('chapter', '1').add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def _verify_descendants_are_staff_only(self, item):
"""Verifies that all the descendants of item are staff only"""
self.assertTrue(item.is_staff_only)
if hasattr(item, 'children'):
for child in item.children():
self._verify_descendants_are_staff_only(child)
def _remove_staff_lock_and_verify_warning(self, outline_item, expect_warning):
"""Removes staff lock from a course outline item and checks whether or not a warning appears."""
modal = outline_item.edit()
modal.is_explicitly_locked = False
if expect_warning:
self.assertTrue(modal.shows_staff_lock_warning())
else:
self.assertFalse(modal.shows_staff_lock_warning())
modal.save()
def _toggle_lock_on_unlocked_item(self, outline_item):
"""Toggles outline_item's staff lock on and then off, verifying the staff lock warning"""
self.assertFalse(outline_item.has_staff_lock_warning)
outline_item.set_staff_lock(True)
self.assertTrue(outline_item.has_staff_lock_warning)
self._verify_descendants_are_staff_only(outline_item)
outline_item.set_staff_lock(False)
self.assertFalse(outline_item.has_staff_lock_warning)
def _verify_explicit_staff_lock_remains_after_unlocking_parent(self, child_item, parent_item):
"""Verifies that child_item's explicit staff lock remains after removing parent_item's staff lock"""
child_item.set_staff_lock(True)
parent_item.set_staff_lock(True)
self.assertTrue(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
parent_item.set_staff_lock(False)
self.assertFalse(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
def test_units_can_be_locked(self):
"""
Scenario: Units can be locked and unlocked from the course outline page
Given I have a course with a unit
When I click on the configuration icon
And I enable explicit staff locking
And I click save
Then the unit shows a staff lock warning
And when I click on the configuration icon
And I disable explicit staff locking
And I click save
Then the unit does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
self._toggle_lock_on_unlocked_item(unit)
def test_subsections_can_be_locked(self):
"""
Scenario: Subsections can be locked and unlocked from the course outline page
Given I have a course with a subsection
When I click on the subsection's configuration icon
And I enable explicit staff locking
And I click save
Then the subsection shows a staff lock warning
And all its descendants are staff locked
And when I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
self._toggle_lock_on_unlocked_item(subsection)
def test_sections_can_be_locked(self):
"""
Scenario: Sections can be locked and unlocked from the course outline page
Given I have a course with a section
When I click on the section's configuration icon
And I enable explicit staff locking
And I click save
Then the section shows a staff lock warning
And all its descendants are staff locked
And when I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
self._toggle_lock_on_unlocked_item(section)
def test_explicit_staff_lock_remains_after_unlocking_section(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a section
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section and one of its units
When I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, section)
def test_explicit_staff_lock_remains_after_unlocking_subsection(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a subsection
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection and one of its units
When I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, subsection)
def test_section_displays_lock_when_all_subsections_locked(self):
"""
Scenario: All subsections in section are explicitly locked, section should display staff only warning
Given I have a course one section and two subsections
When I enable explicit staff lock on all the subsections
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
section.subsection_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_section_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in a section are explicitly locked, section should display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on all the units
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(0).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_subsection_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in subsection are explicitly locked, subsection should display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on all the units
Then the subsection shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
subsection.unit_at(1).set_staff_lock(True)
self.assertTrue(subsection.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_subsections_locked(self):
"""
Scenario: Only some subsections in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section and two subsections
When I enable explicit staff lock on one subsection
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on three units
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_subsection_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in subsection are explicitly locked, subsection should NOT display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on one unit
Then the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
self.assertFalse(subsection.has_staff_lock_warning)
def test_locked_sections_do_not_appear_in_lms(self):
"""
Scenario: A locked section is not visible to students in the LMS
Given I have a course with two sections
When I enable explicit staff lock on one section
And I click the View Live button to switch to staff view
Then I see two sections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.course_outline_page.section_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_sections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
def test_locked_subsections_do_not_appear_in_lms(self):
"""
Scenario: A locked subsection is not visible to students in the LMS
Given I have a course with two subsections
When I enable explicit staff lock on one subsection
And I click the View Live button to switch to staff view
Then I see two subsections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_subsections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_subsections, 1)
def test_toggling_staff_lock_on_section_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a section will not publish its draft units
Given I have a course with a section and unit
And the unit has a draft and published version
When I enable explicit staff lock on the section
And I disable explicit staff lock on the section
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.set_staff_lock(True)
section.set_staff_lock(False)
unit = section.subsection_at(0).unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_toggling_staff_lock_on_subsection_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a subsection will not publish its draft units
Given I have a course with a subsection and unit
And the unit has a draft and published version
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
subsection.set_staff_lock(False)
unit = subsection.unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_removing_staff_lock_from_unit_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which does not inherit staff lock displays a warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the unit
And I disable explicit staff lock on the unit
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, True)
def test_removing_staff_lock_from_subsection_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which does not inherit staff lock displays a warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, True)
def test_removing_staff_lock_from_unit_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which also inherits staff lock displays no warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the subsection
And I enable explicit staff lock on the unit
When I disable explicit staff lock on the unit
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, False)
def test_removing_staff_lock_from_subsection_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which also inherits staff lock displays no warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the section
And I enable explicit staff lock on the subsection
When I disable explicit staff lock on the subsection
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
subsection = section.subsection_at(0)
section.set_staff_lock(True)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, False)
@attr('shard_3')
class EditNamesTest(CourseOutlineTest):
"""
Feature: Click-to-edit section/subsection names
"""
__test__ = True
def set_name_and_verify(self, item, old_name, new_name, expected_name):
"""
Changes the display name of item from old_name to new_name, then verifies that its value is expected_name.
"""
self.assertEqual(item.name, old_name)
item.change_name(new_name)
self.assertFalse(item.in_editable_form())
self.assertEqual(item.name, expected_name)
def test_edit_section_name(self):
"""
Scenario: Click-to-edit section name
Given that I have created a section
When I click on the name of section
Then the section name becomes editable
And given that I have edited the section name
When I click outside of the edited section name
Then the section name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'Changed',
'Changed'
)
def test_edit_subsection_name(self):
"""
Scenario: Click-to-edit subsection name
Given that I have created a subsection
When I click on the name of subsection
Then the subsection name becomes editable
And given that I have edited the subsection name
When I click outside of the edited subsection name
Then the subsection name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'Changed',
'Changed'
)
def test_edit_empty_section_name(self):
"""
Scenario: Click-to-edit section name, enter empty name
Given that I have created a section
And I have clicked to edit the name of the section
And I have entered an empty section name
When I click outside of the edited section name
Then the section name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'',
'Test Section'
)
def test_edit_empty_subsection_name(self):
"""
Scenario: Click-to-edit subsection name, enter empty name
Given that I have created a subsection
And I have clicked to edit the name of the subsection
And I have entered an empty subsection name
When I click outside of the edited subsection name
Then the subsection name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'',
'Test Subsection'
)
def test_editing_names_does_not_expand_collapse(self):
"""
Scenario: A section stays in the same expand/collapse state while its name is edited
Given that I have created a section
And the section is collapsed
When I click on the name of the section
Then the section is collapsed
And given that I have entered a new name
Then the section is collapsed
And given that I press ENTER to finalize the name
Then the section is collapsed
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).expand_subsection()
self.assertFalse(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).edit_name()
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).enter_name('Changed')
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).finalize_name()
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class CreateSectionsTest(CourseOutlineTest):
"""
Feature: Create new sections/subsections/units
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a completely empty course to easily test adding things to it """
pass
def test_create_new_section_from_top_button(self):
"""
Scenario: Create new section from button at top of page
Given that I am on the course outline
When I click the "+ Add section" button at the top of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button(self):
"""
Scenario: Create new section from button at bottom of page
Given that I am on the course outline
When I click the "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button_plus_icon(self):
"""
Scenario: Create new section from button plus icon at bottom of page
Given that I am on the course outline
When I click the plus icon in "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button(click_child_icon=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_subsection(self):
"""
Scenario: Create new subsection
Given that I have created a section
When I click the "+ Add subsection" button in that section
Then I see a new subsection added to the bottom of the section
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
subsections = self.course_outline_page.section_at(0).subsections()
self.assertEqual(len(subsections), 1)
self.assertTrue(subsections[0].in_editable_form())
def test_create_new_unit(self):
"""
Scenario: Create new unit
Given that I have created a section
And that I have created a subsection within that section
When I click the "+ Add unit" button in that subsection
Then I am redirected to a New Unit page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).add_unit()
unit_page = ContainerPage(self.browser, None)
EmptyPromise(unit_page.is_browser_on_page, 'Browser is on the unit page').fulfill()
self.assertTrue(unit_page.is_inline_editing_display_name())
@attr('shard_3')
class DeleteContentTest(CourseOutlineTest):
"""
Feature: Deleting sections/subsections/units
"""
__test__ = True
def test_delete_section(self):
"""
Scenario: Delete section
Given that I am on the course outline
When I click the delete button for a section on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the section
When I click "Yes, I want to delete this component"
Then the confirmation message should close
And the section should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
def test_cancel_delete_section(self):
"""
Scenario: Cancel delete of section
Given that I clicked the delte button for a section on the course outline
And I received a confirmation message, asking me if I really want to delete the component
When I click "Cancel"
Then the confirmation message should close
And the section should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
def test_delete_subsection(self):
"""
Scenario: Delete subsection
Given that I am on the course outline
When I click the delete button for a subsection on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the subsection
When I click "Yes, I want to delete this component"
Then the confiramtion message should close
And the subsection should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0)
def test_cancel_delete_subsection(self):
"""
Scenario: Cancel delete of subsection
Given that I clicked the delete button for a subsection on the course outline
And I received a confirmation message, asking me if I really want to delete the subsection
When I click "cancel"
Then the confirmation message should close
And the subsection should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
def test_delete_unit(self):
"""
Scenario: Delete unit
Given that I am on the course outline
When I click the delete button for a unit on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the unit
When I click "Yes, I want to delete this unit"
Then the confirmation message should close
And the unit should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0)
def test_cancel_delete_unit(self):
"""
Scenario: Cancel delete of unit
Given that I clicked the delete button for a unit on the course outline
And I received a confirmation message, asking me if I really want to delete the unit
When I click "Cancel"
Then the confirmation message should close
And the unit should remain in the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
def test_delete_all_no_content_message(self):
"""
Scenario: Delete all sections/subsections/units in a course, "no content" message should appear
Given that I delete all sections, subsections, and units in a course
When I visit the course outline
Then I will see a message that says, "You haven't added any content to this course yet"
Add see a + Add Section button
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.has_no_content_message)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
self.assertTrue(self.course_outline_page.has_no_content_message)
@attr('shard_3')
class ExpandCollapseMultipleSectionsTest(CourseOutlineTest):
"""
Feature: Courses with multiple sections can expand and collapse all sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a course with two sections """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 2')
)
)
)
def verify_all_sections(self, collapsed):
"""
Verifies that all sections are collapsed if collapsed is True, otherwise all expanded.
"""
for section in self.course_outline_page.sections():
self.assertEqual(collapsed, section.is_collapsed)
def toggle_all_sections(self):
"""
Toggles the expand collapse state of all sections.
"""
for section in self.course_outline_page.sections():
section.expand_subsection()
def test_expanded_by_default(self):
"""
Scenario: The default layout for the outline page is to show sections in expanded view
Given I have a course with sections
When I navigate to the course outline page
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with multiple sections
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
for section in self.course_outline_page.sections():
section.delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_collapse_all_when_all_expanded(self):
"""
Scenario: Collapse all sections when all sections are expanded
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_collapse_all_when_some_expanded(self):
"""
Scenario: Collapsing all sections when 1 or more sections are already collapsed
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I collapse the first section
And I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_expand_all_when_all_collapsed(self):
"""
Scenario: Expanding all sections when all sections are collapsed
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_expand_all_when_some_collapsed(self):
"""
Scenario: Expanding all sections when 1 or more sections are already expanded
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I expand the first section
And I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
@attr('shard_3')
class ExpandCollapseSingleSectionTest(CourseOutlineTest):
"""
Feature: Courses with a single section can expand and collapse all sections.
"""
__test__ = True
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with one section
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_old_subsection_stays_collapsed_after_creation(self):
"""
Scenario: Collapsed subsection stays collapsed after creating a new subsection
Given I have a course with one section and subsection
And I navigate to the course outline page
Then the subsection is collapsed
And when I create a new subsection
Then the first subsection is collapsed
And the second subsection is expanded
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.course_outline_page.section_at(0).add_subsection()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed)
@attr('shard_3')
class ExpandCollapseEmptyTest(CourseOutlineTest):
"""
Feature: Courses with no sections initially can expand and collapse all sections after addition.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Expand/collapse for a course with no sections
Given I have a course with no sections
When I navigate to the course outline page
Then I do not see the "Collapse All Sections" link
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
def test_link_appears_after_section_creation(self):
"""
Scenario: Collapse link appears after creating first section of a course
Given I have a course with no sections
When I navigate to the course outline page
And I add a section
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.course_outline_page.add_section_from_top_button()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.assertFalse(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class DefaultStatesEmptyTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with an empty course
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_empty_course_message(self):
"""
Scenario: Empty course state
Given that I am in a course with no sections, subsections, nor units
When I visit the course outline
Then I will see a message that says "You haven't added any content to this course yet"
And see a + Add Section button
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_no_content_message)
self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present())
@attr('shard_3')
class DefaultStatesContentTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with a course with content
"""
__test__ = True
def test_view_live(self):
"""
Scenario: View Live version from course outline
Given that I am on the course outline
When I click the "View Live" button
Then a new tab will open to the course on the LMS
"""
self.course_outline_page.visit()
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 3)
self.assertEqual(courseware.xblock_component_type(0), 'problem')
self.assertEqual(courseware.xblock_component_type(1), 'html')
self.assertEqual(courseware.xblock_component_type(2), 'discussion')
@attr('shard_3')
class UnitNavigationTest(CourseOutlineTest):
"""
Feature: Navigate to units
"""
__test__ = True
def test_navigate_to_unit(self):
"""
Scenario: Click unit name to navigate to unit page
Given that I have expanded a section/subsection so I can see unit names
When I click on a unit name
Then I will be taken to the appropriate unit page
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
self.assertTrue(unit.is_browser_on_page)
@attr('shard_3')
class PublishSectionTest(CourseOutlineTest):
"""
Feature: Publish sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with 2 subsections inside a single section.
The first subsection has 2 units, and the second subsection has one unit.
"""
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME),
XBlockFixtureDesc('vertical', 'Test Unit 2'),
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 3'),
),
),
)
def test_unit_publishing(self):
"""
Scenario: Can publish a unit and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the first unit, subsection, section
When I publish the first unit
Then I see that publish button for the first unit disappears
And I see publish buttons for subsection, section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
unit.publish()
self.assertFalse(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
def test_subsection_publishing(self):
"""
Scenario: Can publish a subsection and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the first subsection
Then I see that publish button for the first subsection disappears
And I see that publish buttons disappear for the child units of the subsection
And I see publish button for section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME).publish()
self.assertFalse(unit.publish_action)
self.assertFalse(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
def test_section_publishing(self):
"""
Scenario: Can publish a section and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the section
Then I see that publish buttons disappears
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.assertTrue(unit.publish_action)
self.course_outline_page.section(SECTION_NAME).publish()
self.assertFalse(subsection.publish_action)
self.assertFalse(section.publish_action)
self.assertFalse(unit.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_section(SECTION_NAME, 'Test Subsection 2')
self.assertEqual(1, self.courseware.num_xblock_components)
def _add_unpublished_content(self):
"""
Adds unpublished HTML content to first three units in the course.
"""
for index in xrange(3):
self.course_fixture.create_xblock(
self.course_fixture.get_nested_xblocks(category="vertical")[index].locator,
XBlockFixtureDesc('html', 'Unpublished HTML Component ' + str(index)),
)
def _get_items(self):
"""
Returns first section, subsection, and unit on the page.
"""
section = self.course_outline_page.section(SECTION_NAME)
subsection = section.subsection(SUBSECTION_NAME)
unit = subsection.expand_subsection().unit(UNIT_NAME)
return (section, subsection, unit)
@attr('shard_3')
class DeprecationWarningMessageTest(CourseOutlineTest):
"""
Feature: Verify deprecation warning message.
"""
HEADING_TEXT = 'This course uses features that are no longer supported.'
COMPONENT_LIST_HEADING = 'You must delete or replace the following components.'
ADVANCE_MODULES_REMOVE_TEXT = ('To avoid errors, edX strongly recommends that you remove unsupported features '
'from the course advanced settings. To do this, go to the Advanced Settings '
'page, locate the "Advanced Module List" setting, and then delete the following '
'modules from the list.')
DEFAULT_DISPLAYNAME = "Deprecated Component"
def _add_deprecated_advance_modules(self, block_types):
"""
Add `block_types` into `Advanced Module List`
Arguments:
block_types (list): list of block types
"""
self.advanced_settings.visit()
self.advanced_settings.set_values({"Advanced Module List": json.dumps(block_types)})
def _create_deprecated_components(self):
"""
Create deprecated components.
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc('combinedopenended', "Open", data=load_data_str('ora_peer_problem.xml'))
)
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('peergrading', 'Peer'))
def _verify_deprecation_warning_info(
self,
deprecated_blocks_present,
components_present,
components_display_name_list=None,
deprecated_modules_list=None
):
"""
Verify deprecation warning
Arguments:
deprecated_blocks_present (bool): deprecated blocks remove text and
is list is visible if True else False
components_present (bool): components list shown if True else False
components_display_name_list (list): list of components display name
deprecated_modules_list (list): list of deprecated advance modules
"""
self.assertTrue(self.course_outline_page.deprecated_warning_visible)
self.assertEqual(self.course_outline_page.warning_heading_text, self.HEADING_TEXT)
self.assertEqual(self.course_outline_page.modules_remove_text_shown, deprecated_blocks_present)
if deprecated_blocks_present:
self.assertEqual(self.course_outline_page.modules_remove_text, self.ADVANCE_MODULES_REMOVE_TEXT)
self.assertEqual(self.course_outline_page.deprecated_advance_modules, deprecated_modules_list)
self.assertEqual(self.course_outline_page.components_visible, components_present)
if components_present:
self.assertEqual(self.course_outline_page.components_list_heading, self.COMPONENT_LIST_HEADING)
self.assertItemsEqual(self.course_outline_page.components_display_names, components_display_name_list)
def test_no_deprecation_warning_message_present(self):
"""
Scenario: Verify that deprecation warning message is not shown if ORA1
advance modules are not present and also no ORA1 component exist in
course outline.
When I goto course outline
Then I don't see ORA1 deprecated warning
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self):
"""
Scenario: Verify deprecation warning message if ORA1 advance modules
and ORA1 components are present.
Given I have ORA1 advance modules present in `Advanced Module List`
And I have created 2 ORA1 components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=True,
components_display_name_list=['Open', 'Peer'],
deprecated_modules_list=['peergrading', 'combinedopenended']
)
def test_deprecation_warning_with_no_displayname(self):
"""
Scenario: Verify deprecation warning message if ORA1 components are present.
Given I have created 1 ORA1 deprecated component
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see list of ORA1 components with correct message
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
# Create a deprecated ORA1 component with display_name to be empty and make sure
# the deprecation warning is displayed with
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc(category='combinedopenended', display_name="", data=load_data_str('ora_peer_problem.xml'))
)
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=[self.DEFAULT_DISPLAYNAME],
)
def test_warning_with_ora1_advance_modules_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 advance modules are present and no ORA1 component exist.
Given I have ORA1 advance modules present in `Advanced Module List`
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I don't see list of ORA1 components
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=False,
deprecated_modules_list=['peergrading', 'combinedopenended']
)
def test_warning_with_ora1_components_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 component exist and no ORA1 advance modules are present.
Given I have created two ORA1 components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I don't see ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
"""
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=['Open', 'Peer']
)
@attr('shard_4')
class SelfPacedOutlineTest(CourseOutlineTest):
"""Test the course outline for a self-paced course."""
def populate_course_fixture(self, course_fixture):
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME)
)
),
)
self.course_fixture.add_course_details({
'self_paced': True,
'start_date': datetime.now() + timedelta(days=1)
})
ConfigModelFixture('/config/self_paced', {'enabled': True}).install()
def test_release_dates_not_shown(self):
"""
Scenario: Ensure that block release dates are not shown on the
course outline page of a self-paced course.
Given I am the author of a self-paced course
When I go to the course outline
Then I should not see release dates for course content
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
self.assertEqual(section.release_date, '')
subsection = section.subsection(SUBSECTION_NAME)
self.assertEqual(subsection.release_date, '')
def test_edit_section_and_subsection(self):
"""
Scenario: Ensure that block release/due dates are not shown
in their settings modals.
Given I am the author of a self-paced course
When I go to the course outline
And I click on settings for a section or subsection
Then I should not see release or due date settings
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
modal = section.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
modal.cancel()
subsection = section.subsection(SUBSECTION_NAME)
modal = subsection.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
|
simbs/edx-platform
|
common/test/acceptance/tests/studio/test_studio_outline.py
|
Python
|
agpl-3.0
| 81,777
|
[
"VisIt"
] |
46f1db0da32edcd0f120df29c91840299f8e5a5e3846ac5b44fde205b38efeae
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# fractal.py
#
# A script who display a fractal based on a tuple.
#
# This program is free software licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 France License.
# To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/3.0/fr/
# or send a letter to Creative Commons,
# 444 Castro Street, Suite 900, Mountain View, California, 94041, USA.
from random import randint
from time import sleep, time
import turtle as t
import math as m
import getopt
import sys
__version__ = "0.1"
__author__ = "Jérémie Balagna-Ranin <jeremie.balagna@gmail.com>"
class Fractal(object):
'''Main fractal class'''
def __init__(self, intuple, modtuple):
'''Initialisation of fractal object'''
self.ADD = 0
self.SUB = 1
self.MUL = 2
self.DIV = 3
self.POW = 4
self.MOD = 5
self.data = []
for value in intuple:
self.data.append(float(value))
self.data = tuple(self.data)
self.manips = list(self.data)
self.mod = []
for mod in modtuple:
if mod[0] == '+':
self.mod.append((self.ADD, float(mod[1:])))
elif mod[0] == '-':
self.mod.append((self.SUB, float(mod[1:])))
elif mod[0] == '*':
self.mod.append((self.MUL, float(mod[1:])))
elif mod[0] == '/':
self.mod.append((self.DIV, float(mod[1:])))
elif mod[0] == '%':
self.mod.append((self.MOD, float(mod[1:])))
else:
self.mod.append((self.ADD, float(mod)))
def calculate(self):
'''Do the operation'''
for i in range(0, len(self.mod)):
if self.mod[i][0] == self.ADD:
self.manips[i] += self.mod[i][1]
elif self.mod[i][0] == self.SUB:
self.manips[i] -= self.mod[i][1]
elif self.mod[i][0] == self.MUL:
self.manips[i] *= self.mod[i][1]
elif self.mod[i][0] == self.DIV:
self.manips[i] /= self.mod[i][1]
elif self.mod[i][0] == self.POW:
self.manips[i] **= self.mod[i][1]
elif self.mod[i][0] == self.MOD:
self.manips[i] %= self.mod[i][1]
print(self.manips)
def get(self):
'''Simply return the working tuple'''
return self.manips
def dtan(fr, to):
'''directly return angle in degreeses and dist to trace from two points'''
dx = to[0] - fr[0]
dy = to[1] - fr[1]
if dy == 0:
dy = 1
hyp = int(m.sqrt(dx**2 + dy**2))
ang = int(m.degrees(m.tan(dx / dy)))
return (ang, hyp)
def usage():
print('''Usage: ./fractaliser.py [options]''')
print(''' -h: print this help and exit''')
print(''' --iterations=<int>: set how many time fractal will iterate to the''')
print(''' recursive function''')
print(''' --intuple=<tuple>: set input tuple formed as x1,y1,x2,y2,...''')
print(''' Here, tuple may has the following form:''')
print(''' x1,y1,x2,y2,r,g,b''')
print(''' where x1,y1: coord of first point''')
print(''' where x2,y2: coord of second point''')
print(''' where r,g,b: composants of color''')
print(''' --modtuple=<tuple>: set modificator tuple formed as ''')
print(''' <symbol>x1,<symbol>y1,<symbol>x2,...''')
print(''' Here, tuple may has the following form:''')
print(''' x1,y1,x2,y2,r,g,b''')
print(''' with symbole ahead of value''')
def draw(img):
ang, hyp = dtan((img[0], img[1]), (img[2], img[3]))
t.color(img[4] % 255, img[5] % 255, img[6] % 255)
t.left(ang)
t.forward(hyp)
def run(intuple, modtuple, ite=42):
# init turtle screen
t.title("Simple fractal try")
t.hideturtle()
t.colormode(255)
t.color(255, 255, 255)
# Start display
print('''Start render''')
stime = int(time())
fractal = Fractal(intuple, modtuple)
draw(fractal.get())
for i in range(1, ite):
fractal.calculate()
draw(fractal.get())
sleep(0.1)
if i % 10 == 0:
print('''-------------------- ''' + str(i) + ''' iterations --------------------''')
sleep(0.1)
if i % 100 == 0:
sleep(0.5)
etime = int(time())
diff_t = etime - stime
diff_m = int(diff_t / 60)
diff_s = int(diff_t % 60)
print('''Finish render in ''', diff_m, ''' min and ''', diff_s, ''' sec.''')
t.exitonclick()
if __name__ == '__main__':
# Args management
try:
opts, args = getopt.getopt(sys.argv[1:], "h",
["iterations=", "intuple=", "modtuple=", "help"])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(1)
ite = 42
intuple = ('0', '0', '42', '42', '0', '0', '0')
modtuple = ('+42', '+42', '+42', '+42', '+42', '+42', '+42')
for o, a in opts:
if o in('-h', '--help'):
usage()
sys.exit()
continue
if o == '--iterations':
ite = int(a)
continue
if o == '--intuple':
intuple = tuple(a.split(','))
if len(intuple) != 7:
print('''intuple must be with 7 values separated by ',' ''')
continue
if o == '--modtuple':
modtuple = tuple(a.split(','))
if len(modtuple) != 7:
print('''modtuple must be with 7 values separated by ',' ''')
continue
run(intuple, modtuple, ite)
|
Meier-Link/fractaliser
|
fractaliser.py
|
Python
|
unlicense
| 5,370
|
[
"VisIt"
] |
2a12a5e2b7421cf869c72a7ba324c86b9472ba5c28867c6190079f728ec1c8c2
|
# -*- coding: utf-8 -*-
def do_hello():
print(u"ciao RASPA !")
tot = 1 + 1
print(tot)
def fib_from_string(x):
s = "0112358"
if x < 0 or x > 6:
raise ValueError("Valore non ammesso {}".format(x))
else:
return int(s[x])
def fib_from_list(x):
l = [0,1,1,2,3,5,8]
if x < 0 or x > 6:
raise ValueError("Valore non ammesso {}".format(x))
else:
return l[x]
def fib_from_algo(x):
if x < 0:
raise ValueError("Valore non ammesso {}".format(x))
elif x == 0 or x == 1:
return x
else:
return fib_from_algo(x - 1) + fib_from_algo(x - 2)
fib_dic = {0 : 1, 1 : 1}
def fib_from_dict(x):
"""
Calcola fibonacci con cache su dizionario
"""
if x in fib_dic:
return fib_dic[x]
fib_pre = fib_from_dict(x-1)
fib_dic[x-1] = fib_pre
fib_prepre = fib_from_dict(x-2)
fib_dic[x-2] = fib_prepre
return fib_pre + fib_prepre
fib = fib_from_dict
if __name__ == "__main__":
pos_fib = raw_input("Posizione di fibonacci")
max_fib = int(pos_fib)
print(fib(max_fib))
|
feroda/lessons-python4beginners
|
students/2016-09-04/raspa77/fibonacci.py
|
Python
|
agpl-3.0
| 1,129
|
[
"RASPA"
] |
27eb4b893cf99e150a881699d0e434a0ec2c96aae416c4bb292b64fa68814a7c
|
# -*- coding: utf-8 -*-
##############################################################################
# Filename: libExtractStar.py
# Version: $Revision$
# Description: Extract_star utilities module
# Modified at: $Date$
# Author: $Author$
# $Id$
##############################################################################
from __future__ import print_function
import re
import itertools # product
import numpy as N
import scipy.linalg as SL # More complete than numpy.linalg
import scipy.optimize as SO
from .pySNIFS import SNIFS_cube
from .extern import Atmosphere as TA
"""Extract_star classes and functions."""
__author__ = "Y. Copin, C. Buton, E. Pecontal"
__version__ = '$Id$'
LbdaRef = 5000. # Use constant ref. wavelength for easy comparison
SpxSize = SNIFS_cube.spxSize # Spaxel size in arcsec
STEPJ = (7, 7) # Definition of step limit (i, j)
MIN_ELLIPTICITY = 0.2
MAX_ELLIPTICITY = 5.
MIN_ALPHA = 0.1
MAX_ALPHA = 15.
MAX_POSITION = 9.
def print_msg(astr, limit, verb=0):
"""
Print message 'str' if verbosity level (typically opts.verbosity)
>= limit.
"""
if verb >= limit:
print(astr)
# PSF fitting ==============================
def fit_metaslices(cube, psf_fn, skyDeg=0, nsky=2, chi2fit=True,
scalePriors=0., seeingPrior=None, posPrior=None,
airmass=1., verbosity=1):
"""
Adjust PSF parameters on each (meta)slices of (meta)*cube* using
PSF *psf_fn* and a background of polynomial degree *skyDeg*. Add
priors on seeing and position if any.
"""
from scipy.ndimage.filters import median_filter
import pySNIFS_fit
assert skyDeg >= -2, \
"skyDeg=%d is invalid (should be >=-2)" % skyDeg
npar_psf = 7 # Number of parameters of the psf
if skyDeg == -2: # Nb of parameters in step background (per slice)
npar_sky = 2 # = StepJ.npar_ind
else: # Nb of params in polynomial background
npar_sky = (skyDeg + 1) * (skyDeg + 2) / 2
cube_sky = SNIFS_cube() # 1-slice cube for background fit
cube_sky.x = cube.x
cube_sky.y = cube.y
cube_sky.i = cube.i
cube_sky.j = cube.j
cube_sky.nslice = 1
cube_sky.nlens = cube.nlens
cube_star = SNIFS_cube() # 1-slice cube for point-source fit
cube_star.x = cube.x
cube_star.y = cube.y
cube_star.i = cube.i
cube_star.j = cube.j
cube_star.nslice = 1
cube_star.nlens = cube.nlens
# PSF intensity + Sky + Bkgnd coeffs
params = N.zeros((cube.nslice, npar_psf + 1 + npar_sky), dtype='d')
dparams = N.zeros((cube.nslice, npar_psf + 1 + npar_sky), dtype='d')
chi2s = N.zeros(cube.nslice, dtype='d')
# Nb of edge spx used for sky estimate
if nsky > 7:
raise ValueError('The number of edge pixels should be less than 7')
skySpx = ( (cube_sky.i < nsky) | (cube_sky.i >= 15 - nsky) |
(cube_sky.j < nsky) | (cube_sky.j >= 15 - nsky) )
print_msg(" Set initial guess from 2D-Gaussian + Cte fit", 2, verbosity)
parnames = ['delta=0', 'theta=0', 'xc', 'yc', 'xy', 'y2', 'alpha', 'I'] + \
['B%02d' % j for j in range(npar_sky)]
print_msg(" Adjusted parameters: %s" % ','.join(parnames), 2, verbosity)
xc, yc = None, None # Position guess at lmid
alpha = None # Alpha guess
channel = 'B' if cube.lstart < 5000. else 'R'
if channel == 'B': # Blue cube
loop = range(cube.nslice)[::-1] # Blueward loop
else: # Red cube
loop = range(cube.nslice) # Redward loop
# Hyper-term on alpha from seeing prior and on position
if scalePriors:
if not psf_fn.model.endswith('powerlaw'):
raise NotImplementedError
if seeingPrior:
print(" Seeing prior: %.2f\"" % seeingPrior)
if posPrior is not None:
print(" Position prior: %+.2f x %+.2f spx" % tuple(posPrior))
for i in loop: # Loop over cube slices
# Fill-in the meta-slice
print_msg((" Meta-slice #%d/%d, %.0f A " %
(i + 1, cube.nslice, cube.lbda[i])).center(50, '-'),
2, verbosity)
# Single-slice cubes
cube_star.lbda = N.array([cube.lbda[i]])
cube_star.data = cube.data[i, N.newaxis]
cube_star.var = cube.var[i, N.newaxis]
cube_sky.data = cube.data[i, N.newaxis].copy() # Will be modified
cube_sky.var = cube.var[i, N.newaxis].copy()
# Sky median estimate from FoV edge spx
skyLev = N.median(cube_sky.data[:, skySpx], axis=None)
if skyDeg > 0:
# Fit a 2D polynomial of degree skyDeg on the edge pixels
# of a given cube slice.
cube_sky.var[:, ~skySpx] = 0 # Do not adjust central spaxels
if not chi2fit:
cube_sky.var[cube_sky.var > 0] = 1 # Least-square
model_sky = pySNIFS_fit.model(
data=cube_sky, func=['poly2D;%d' % skyDeg],
param=[[skyLev] + [0.] * (npar_sky - 1)],
bounds=[[[0, None]] +
[[None, None]] * (npar_sky - 1)])
model_sky.fit()
skyLev = model_sky.evalfit().squeeze() # Structured bkgnd estimate
# Rough guess parameters for the current slice
medstar = median_filter(cube_star.data[0], 3) - skyLev # (nspx,)
imax = medstar.max() # Intensity
if posPrior is not None: # Use prior on position
# Note the prior on position (formally at ADR reference wavelength
# lmid) is not differentially refracted to current wavelength
xc, yc = posPrior
elif (xc, yc) == (None, None) or (xc, yc) == (0, 0):
# No prior nor previous estimate: use flux-weighted centroid on
# central part
xc = N.average(cube.x[~skySpx], weights=medstar[~skySpx])
yc = N.average(cube.y[~skySpx], weights=medstar[~skySpx])
if not (-7 + nsky < xc < 7 - nsky and -7 + nsky < yc < 7 - nsky):
xc, yc = 0., 0.
cube_sky.data -= skyLev # Subtract background level
if chi2fit:
cube_sky.var = cube.var[i, N.newaxis] # Reset to cube.var for chi2
else:
cube_sky.var = None # Least-square
# Guess parameters from 2D-Gaussian + polynomial background fit
model_gauss = pySNIFS_fit.model(data=cube_sky,
func=['gaus2D', 'poly2D;0'],
param=[[xc, yc, 1, 1, imax], [0]],
bounds=[[[-7, +7]] * 2 + # xc,yc
[[0.4, 4]] * 2 + # sx,sy
# intensity
[[0, 5 * imax]],
[[None, None]]]) # background
model_gauss.minimize(verbose=(verbosity >= 3), tol=1e-4)
print_msg(model_gauss.facts(params=verbosity >= 3,
names=['xc', 'yc', 'sx', 'sy', 'I', 'B']),
2, verbosity)
if model_gauss.success:
xc, yc = model_gauss.fitpar[:2] # Update centroid position
alpha = max(N.hypot(*model_gauss.fitpar[2:4]), 1.)
else:
print("WARNING: gaussian fit failed (status=%d: %s) " % \
(model_gauss.status, model_gauss.res.message))
if alpha is None:
alpha = 2.4 # Educated guess from median seeing
# Hyper-term on alpha from seeing and position prior
hyper = {}
if scalePriors:
hterm = Hyper_PSF2D_PL(cube.lbda[i], seeingPrior, airmass, channel,
position=posPrior, scale=scalePriors)
print_msg(str(hterm), 2, verbosity)
hyper = {psf_fn.name: hterm}
if seeingPrior:
alpha = hterm.predict_alpha(cube.lbda[i])
if posPrior is not None:
xc, yc = posPrior
# Filling in the guess parameter arrays (px) and bounds arrays (bx)
p1 = [0., 0., xc, yc, 0., 1., alpha, imax] # psf parameters
b1 = [[0, 0], # delta (unfitted)
[0, 0], # theta (unfitted)
[-10, 10], # xc
[-10, 10], # yc
[-0.6, +0.6], # xy parameter
[0.2, 5], # Ellipticity parameter > 0
[0.1, 15], # alpha > 0
[0, None]] # Intensity > 0
# alphaDeg & ellDeg set to 0 for meta-slice fits
func = ['%s;%f,%f,%f,%f' %
(psf_fn.name, cube_star.spxSize, cube_star.lbda[0], 0, 0)]
param = [p1]
bounds = [b1]
myfunc = {psf_fn.name: psf_fn} # Pointer to PSF function
# Background initial guess
if skyDeg >= 0:
# Use estimate from prev. polynomial fit
if skyDeg:
p2 = list(model_sky.fitpar)
else: # Guess: Background=constant (>0)
p2 = [skyLev]
b2 = [[0, None]] + [[None, None]] * (npar_sky - 1)
func += ['poly2D;%d' % skyDeg]
param += [p2]
bounds += [b2]
elif skyDeg == -2: # Step background
p2 = [skyLev, 0] # Mean and difference
b2 = [[0, None], [None, None]]
func += ['stepJ;%d,%d' % STEPJ]
myfunc.update(((StepJ.name, StepJ),)) # Pointer to StepJ function
param += [p2]
bounds += [b2]
else: # No background
p2 = []
# print_msg(" Initial guess: %s" % (p1+p2), 2, verbosity)
# Chi2 vs. Least-square fit
if not chi2fit:
cube_star.var = None # Will be handled by pySNIFS_fit.model
# Instantiate the model class and fit current slice
model_star = pySNIFS_fit.model(data=cube_star, func=func,
param=param, bounds=bounds,
myfunc=myfunc,
hyper=hyper)
if verbosity >= 4:
print("Gradient checks:") # Includes hyper-term if any
model_star.check_grad()
model_star.minimize(verbose=(verbosity >= 2), tol=1e-6,
options={'maxiter': 400})
print_msg(model_star.facts(params=(verbosity >= 2), names=parnames),
1, verbosity)
if scalePriors:
print_msg(" Hyper-term: h=%f" % hterm.comp(model_star.fitpar),
2, verbosity)
# Restore true chi2 (not reduced one), ie. chi2 =
# ((cube_star.data-model_star.evalfit())**2/cube_star.var).sum()
# For least-square fitting, this actually corresponds to
# RSS=residual sum of squares
model_star.khi2 *= model_star.dof
# Check fit results
if not model_star.success: # Fit failure
pass
elif not MIN_ELLIPTICITY < model_star.fitpar[5] < MAX_ELLIPTICITY:
model_star.success = False
model_star.status = -1
model_star.res.message = "ellipticity is invalid (%.2f)" % \
model_star.fitpar[5]
elif not MIN_ALPHA < model_star.fitpar[6] < MAX_ALPHA:
model_star.success = False
model_star.status = -2
model_star.res.message = "alpha is invalid (%.2f)" % \
model_star.fitpar[6]
elif not model_star.fitpar[7] > 0:
model_star.success = False
model_star.status = -3
model_star.res.message = "intensity is null"
elif not (abs(model_star.fitpar[2]) < MAX_POSITION and
abs(model_star.fitpar[3]) < MAX_POSITION):
model_star.success = False
model_star.status = -3
model_star.res.message = "source is outside FoV (%.2f,%.2f)" % \
(model_star.fitpar[2],
model_star.fitpar[3])
# Error computation and metaslice clipping
if model_star.success:
cov = model_star.param_cov()
diag = cov.diagonal()
if (diag >= 0).all():
dpar = N.sqrt(diag)
else: # Some negative diagonal elements!
model_star.success = False
model_star.status = -4
model_star.res.message = "negative covariance diagonal elements"
if not model_star.success: # Set error to 0 if status
print("WARNING: metaslice #%d, status=%d: %s" % \
(i + 1, model_star.status, model_star.res.message,))
model_star.khi2 *= -1 # To be discarded
dpar = N.zeros(len(dparams.T))
else:
xc, yc = model_star.fitpar[2:4] # Update centroid position
# Storing the result of the current slice parameters
params[i] = model_star.fitpar
dparams[i] = dpar
chi2s[i] = model_star.khi2
return params, chi2s, dparams
# Point-source extraction ==============================
def extract_specs(cube, psf, skyDeg=0,
method='psf', radius=5., chi2fit=True, verbosity=0):
"""
Extract object and sky spectra from *cube* using PSF -- described
by *psf*=(psf_fn,psf_ctes,psf_param) -- in presence of sky
(polynomial degree *skyDeg*) using *method* ('psf':
PSF-photometry, 'aperture': aperture photometry, or
'optimal'). For aperture related methods, *radius* gives aperture
radius in arcsec.
Returns (lbda, sigspecs, varspecs) where sigspecs and varspecs are
(nslice, npar+1).
"""
assert method in ('psf', 'aperture', 'subaperture', 'optimal'), \
"Unknown extraction method '%s'" % method
assert skyDeg >= -2, \
"skyDeg=%d is invalid (should be >=-2)" % skyDeg
if (N.isnan(cube.var).any()):
print("WARNING: discarding NaN variances in extract_specs")
cube.var[N.isnan(cube.var)] = 0
if (cube.var > 1e20).any():
print("WARNING: discarding infinite variances in extract_specs")
cube.var[cube.var > 1e20] = 0
if (cube.var < 0).any(): # There should be none anymore
print("WARNING: discarding negative variances in extract_specs")
cube.var[cube.var < 0] = 0
psf_fn, psf_ctes, psf_param = psf # Unpack PSF description
# The PSF parameters are only the shape parameters. We arbitrarily
# set the intensity of each slice to 1.
param = N.concatenate((psf_param, N.ones(cube.nslice)))
# General linear least-squares fit: data = I*PSF + sky [ + a*x + b*y + ...]
# See Numerical Recipes (2nd ed.), sect.15.4
spxSize = psf_ctes[0] # Spaxel size [arcsec]
cube.x = cube.i - 7 # x in spaxel
cube.y = cube.j - 7 # y in spaxel
model = psf_fn(psf_ctes, cube)
psf = model.comp(param, normed=True) # (nslice, nlens)
if skyDeg == -2: # Step background
npar_sky = 2
else: # Polynomial background (or none)
npar_sky = (skyDeg + 1) * (skyDeg + 2) / 2
# Basis function matrix: BF (nslice, nlens, npar + 1) (so-called X in NR)
BF = N.zeros((cube.nslice, cube.nlens, npar_sky + 1), 'd')
BF[:, :, 0] = psf # Intensity
if skyDeg >= 0: # Build up polynomial background
BF[:, :, 1] = 1 # Constant term
n = 2
for d in xrange(1, skyDeg + 1):
for j in xrange(d + 1):
# Background polynomials as function of spaxel (centered)
# position [spx]
BF[:, :, n] = cube.x**(d - j) * cube.y**j
n += 1 # Finally: n = npar_sky + 1
elif skyDeg == -2: # Step background
bkgnd = StepJ(STEPJ, cube)
BF[:, :, 1:] = 1 # Mean background spectrum
BF[:, ~bkgnd.lower_part, 2] = -1 # Differential background spectrum
# Chi2 (variance-weighted) vs. Least-square (unweighted) fit
# *Note* that weight is actually 1/sqrt(var) (see NR)
if chi2fit:
weight = N.where(cube.var > 0, cube.var**(-0.5), 0) # (nslice, nlens)
else:
weight = N.where(cube.var > 0, 1, 0) # (nslice, nlens)
# Design matrix (basis functions normalized by std errors)
A = BF * weight[..., N.newaxis] # (nslice, nlens, npar + 1)
b = weight * cube.data # (nslice, nlens)
# The linear least-squares fit AX = b could be done directly using
#
# sigspecs = N.array([ N.linalg.lstsq(aa, bb)[0] for aa, bb in zip(A, b) ])
#
# but Alpha = dot(A.T, A) is needed anyway to compute covariance
# matrix Cov = 1 / Alpha. Furthermore, linear resolution
#
# [ N.linalg.solve(aa, bb) for aa, bb in zip(Alpha, Beta) ]
#
# can be replace by faster (~x10) matrix product
#
# [ N.dot(cc, bb) for cc, bb in zip(Cov, Beta) ]
#
# since Cov = 1 / Alpha is readily available.
#
# "Solving Ax = b: inverse vs cholesky factorization" thread
# (http://thread.gmane.org/gmane.comp.python.numeric.general/41365)
# advocates to never invert a matrix directly: that's why we use
# SVD-based inversion SL.pinv2.
# Alpha = N.einsum('...jk,...jl',A,A) # ~x2 slower
Alpha = N.array([N.dot(aa.T, aa) for aa in A]) # (nslice, npar+1, npar+1)
# Beta = N.einsum('...jk,...j',A,b)
Beta = N.array([N.dot(aa.T, bb) for aa, bb in zip(A, b)]) # (nslice, npar+1)
try:
Cov = N.array([SL.pinv2(aa) for aa in Alpha]) # (nslice, npar+1, npar+1)
except SL.LinAlgError:
raise SL.LinAlgError("Singular matrix during spectrum extraction")
# sigspecs & varspecs = nslice x [Star,Sky,[slope_x...]]
sigspecs = N.array([N.dot(cc, bb)
for cc, bb in zip(Cov, Beta)]) # (nslice, npar+1)
varspecs = N.array([N.diag(cc) for cc in Cov]) # (nslice, npar+1)
# Compute the least-square variance using the chi2-case method
# (errors are meaningless in pure least-square case)
if not chi2fit:
weight = N.where(cube.var > 0, cube.var**(-0.5), 0)
A = BF * weight[..., N.newaxis]
Alpha = N.array([N.dot(aa.T, aa) for aa in A])
try:
Cov = N.array([SL.pinv2(aa) for aa in Alpha])
except SL.LinAlgError:
raise SL.LinAlgError("Singular matrix during variance extraction")
varspecs = N.array([N.diag(cc) for cc in Cov])
# Now, what about negative sky? The pb arises for short-exposures,
# where there's probably no sky whatsoever (except if taken during
# twilight), and where a (significantly) negative sky is actually
# a shortcoming of the PSF. For long exposures, one expects "some"
# negative sky values, where sky is compatible to 0.
#
# One could also use a NNLS fit to force parameter non-negativity:
#
# [ pySNIFS_fit.fnnls(aa, bb)[0] for aa, bb in zip(Alpha, Beta) ]
#
# *BUT*:
# 1. It is incompatible w/ non-constant sky (since it will force
# all sky coeffs to >0). This can therefore be done only if
# skyDeg=0 (it would otherwise involve optimization with
# constraints on sky positivity).
# 2. There is no easy way to estimate covariance matrix from NNLS
# fit. Since an NNLS fit on a negative sky slice would probably
# always lead to a null sky, an NNLS fit is then equivalent to
# a standard 'PSF' fit without sky.
if skyDeg in (0, -2):
negSky = sigspecs[:, 1] < 0 # Test for presence of negative sky
if negSky.any(): # and 'long' not in psf_fn.name.lower():
print("WARNING: %d slices w/ sky<0 in extract_specs" %
(len(negSky.nonzero()[0])))
print_msg(str(cube.lbda[negSky]), 3, verbosity)
# if 'short' in psf_fn.name:
if False:
# For slices w/ sky<0, fit only PSF without background
Alpha = N.array([N.dot(aa, aa) for aa in A[negSky, :, 0]])
Beta = N.array([N.dot(aa, bb)
for aa, bb in zip(A[negSky, :, 0], b[negSky])])
Cov = 1 / Alpha
sigspecs[negSky, 0] = Cov * Beta # Linear fit without sky
sigspecs[negSky, 1] = 0 # Set sky to null
varspecs[negSky, 0] = Cov
varspecs[negSky, 1] = 0
if method == 'psf':
return cube.lbda, sigspecs, varspecs # PSF extraction
# Reconstruct background and subtract it from cube
bkgnd = N.zeros_like(cube.data)
var_bkgnd = N.zeros_like(cube.var)
if npar_sky:
for d in xrange(1, npar_sky + 1): # Loop over sky components
bkgnd += (BF[:, :, d].T * sigspecs[:, d]).T
var_bkgnd += (BF[:, :, d].T**2 * varspecs[:, d]).T
subData = cube.data - bkgnd # Bkgnd subtraction (nslice, nlens)
subVar = cube.var.copy()
good = cube.var > 0
subVar[good] += var_bkgnd[good] # Variance of bkgnd-sub. signal
# Replace invalid data (var=0) by model PSF = Intensity*PSF
if not good.all():
print_msg("Replacing %d vx with modeled signal" %
len((~good).nonzero()[0]), 1, verbosity)
subData[~good] = (sigspecs[:, 0] * psf.T).T[~good]
# Plain summation over aperture
# Aperture radius in spaxels
aperRad = radius / spxSize
print_msg("Aperture radius: %.2f arcsec = %.2f spx" % (radius, aperRad),
1, verbosity)
# Aperture center after ADR offset from lmid [spx] (nslice,)
x0 = ( psf_param[2] + psf_param[0] *
N.cos(psf_param[1]) * model.ADRscale[:, 0] )
y0 = ( psf_param[3] - psf_param[0] *
N.sin(psf_param[1]) * model.ADRscale[:, 0] )
# Radial distance from center [spx] (nslice, nlens)
r = N.hypot((model.x.T - x0).T, (model.y.T - y0).T)
# Circular aperture (nslice, nlens)
# Use r<aperRad[:,N.newaxis] if radius is a (nslice,) vec.
frac = (r < aperRad).astype('float')
if method == 'subaperture':
# Fractions accounting for subspaxels (a bit slow)
newfrac = subaperture(x0, y0, aperRad, 4)
# Remove bad spaxels since subaperture returns the full spaxel grid
w = (~N.isnan(cube.slice2d(0).ravel())).nonzero()[0]
frac = newfrac[:, w]
# Check if aperture hits the FoV edges
hit = ( ((x0 - aperRad) < -7.5) | ((x0 + aperRad) > 7.5) |
((y0 - aperRad) < -7.5) | ((y0 + aperRad) > 7.5) )
if hit.any():
# Find the closest edge
ld = +(x0 - aperRad + 7.5).min() # Dist. to left edge (<0 if outside)
rd = -(x0 + aperRad - 7.5).max() # Dist. to right edge
bd = +(y0 - aperRad + 7.5).min() # Dist. to bottom edge
td = -(y0 + aperRad - 7.5).max() # Dist. to top edge
cd = -min(ld, rd, bd, td) # Should be positive
ns = int(cd) + 1 # Additional spaxels
print("WARNING: Aperture (r=%.2f spx) hits FoV edges by %.2f spx" %
(aperRad, cd))
if method == 'optimal':
print("WARNING: Model extrapolation outside FoV "
"not implemented for optimal summation.")
elif method == 'subaperture':
print("WARNING: Model extrapolation outside FoV "
"not implemented for sub-aperture summation.")
if hit.any() and method == 'aperture':
# Extrapolate signal from PSF model
print_msg("Signal extrapolation outside FoV...", 1, verbosity)
# Extend usual range by ns spx on each side
nw = 15 + 2 * ns # New FoV size in spaxels
mid = (7 + ns) # FoV center
extRange = N.arange(nw) - mid
extx, exty = N.meshgrid(extRange[::-1], extRange) # nw,nw
extnlens = extx.size # = nlens' = nw**2
print_msg(" Extend FoV by %d spx: nlens=%d -> %d" %
(ns, model.nlens, extnlens), 1, verbosity)
# Compute PSF on extended range (nslice, extnlens)
# Extended model
extModel = psf_fn(psf_ctes, cube, coords=(extx, exty))
extPsf = extModel.comp(param, normed=True) # (nslice, extnlens)
# Embed background-subtracted data in extended model PSF
origData = subData.copy()
origVar = subVar.copy()
# Extended model, (nslice, extnlens)
subData = (sigspecs[:, 0] * extPsf.T).T
subVar = N.zeros((extModel.nslice, extModel.nlens))
for i in xrange(model.nlens):
# Embeb original spx i in extended model array by finding
# corresponding index j in new array
j, = ((extModel.x[0] == model.x[0, i]) &
(extModel.y[0] == model.y[0, i])).nonzero()
subData[:, j[0]] = origData[:, i]
subVar[:, j[0]] = origVar[:, i]
r = N.hypot((extModel.x.T - x0).T, (extModel.y.T - y0).T)
frac = (r < aperRad).astype('float')
if method.endswith('aperture'):
# Replace signal and variance estimates from plain summation
sigspecs[:, 0] = (frac * subData).sum(axis=1)
varspecs[:, 0] = (frac**2 * subVar).sum(axis=1)
return cube.lbda, sigspecs, varspecs # [Sub]Aperture extraction
if method == 'optimal':
from scipy.ndimage.filters import median_filter
# Model signal = Intensity*PSF + bkgnd
modsig = (sigspecs[:, 0] * psf.T).T + bkgnd # (nslice, nlens)
# One has to have a model of the variance. This can be estimated from
# a simple 'photon noise + RoN' model on each slice: signal ~ alpha*N
# (alpha = 1/flat-field coeff and N = photon counts) and variance ~ (N
# + RoN**2) * alpha**2 = (signal/alpha + RoN**2) * alpha**2 =
# alpha*signal + beta. This model disregards spatial component of
# flat-field, which is supposed to be constant on FoV.
# Model variance = alpha*Signal + beta
coeffs = N.array([polyfit_clip(modsig[s], cube.var[s], 1, clip=5)
for s in xrange(cube.nslice)])
coeffs = median_filter(coeffs, (5, 1)) # A bit of smoothing...
modvar = N.array([N.polyval(coeffs[s], modsig[s])
for s in xrange(cube.nslice)]) # (nslice, nlens)
# Optimal weighting
norm = (frac * psf).sum(axis=1) # PSF norm, nslice
npsf = (psf.T / norm).T # (nslice, nlens)
weight = frac * npsf / modvar # Unormalized weights (nslice, nlens)
norm = (weight * npsf).sum(axis=1) # Weight norm, (nslice,)
weight = (weight.T / norm).T # Normalized weights (nslice, nlens)
# Replace signal and variance estimates from optimal summation
sigspecs[:, 0] = (weight * subData).sum(axis=1)
varspecs[:, 0] = (weight**2 * subVar).sum(axis=1)
return cube.lbda, sigspecs, varspecs # Optimal extraction
# Resampling ========================================================
def subaperture(xc, yc, rc, f=0, nspaxel=15):
"""
Compute aperture fraction for each spaxel with resampling
:param xc: aperture X center
:param yc: aperture Y center
:param rc: aperture radius
:param f: resampling factor (e.g. 3 for 2**3-resampling)
:param nspaxel: spaxel grid side
:return: spaxel flux fraction on original 15x15 grid
"""
from .extern.Arrays import rebin
# Resample spaxel center positions, originally [-7:7]
f = 2**f
epsilon = 0.5 / f
border = nspaxel / 2.
r = N.linspace(-border + epsilon, border - epsilon, nspaxel * f)
x, y = N.meshgrid(r, r) # (x,y) positions of resampled array
frac = N.ones(x.shape) / f**2 # Spaxel fraction
xc = N.atleast_1d(xc)
yc = N.atleast_1d(yc)
assert xc.shape == yc.shape
rc = N.atleast_1d(rc)
if len(rc) == 1: # One single radius?
rc = N.repeat(rc, xc.shape)
out = []
# This loop could possibly be achieved with some higher order matrix
for i, j, k in zip(xc, yc, rc):
fr = frac.copy()
fr[N.hypot(x - i, y - j) > k] = 0. # subspaxels outside circle
# Resample back to original size and sum
out.append(rebin(fr, f).ravel())
return N.array(out)
# Header information access utilities ===============================
def read_PT(hdr, MK_pressure=616., MK_temp=2.):
"""
Read pressure [mbar] and temperature [C] from hdr (or use default
Mauna-Kea values), and check value consistency.
"""
if hdr is None:
return MK_pressure, MK_temp
pressure = hdr.get('PRESSURE', N.nan)
if not 550 < pressure < 650: # Non-std pressure
print("WARNING: non-std pressure (%.0f mbar) updated to %.0f mbar" % \
(pressure, MK_pressure))
if isinstance(hdr, dict): # pySNIFS.SNIFS_cube.e3d_data_header
hdr['PRESSURE'] = MK_pressure
else: # True pyfits header, add comment
hdr['PRESSURE'] = (MK_pressure, "Default MK pressure [mbar]")
pressure = MK_pressure
temp = hdr.get('TEMP', N.nan)
if not -20 < temp < 20: # Non-std temperature
print("WARNING: non-std temperature (%.0f C) updated to %.0f C" % \
(temp, MK_temp))
if isinstance(hdr, dict): # pySNIFS.SNIFS_cube.e3d_data_header
hdr['TEMP'] = MK_temp
else: # True pyfits header, add comment
hdr['TEMP'] = (MK_temp, "Default MK temperature [C]")
temp = MK_temp
return pressure, temp
def read_psf(hdr):
"""Return PSF class as read (or guessed) from header."""
assert hdr['ES_METH'] == 'psf', \
"PSF reconstruction only works for PSF spectro-photometry"
try:
psfname = hdr['ES_PSF']
except KeyError:
efftime = hdr['EFFTIME']
print("WARNING: cannot read 'ES_PSF' keyword, " \
"guessing from EFFTIME=%.0fs" % efftime)
# Assert it's an 'classic' PSF model (i.e. 'long' or 'short')
psfname = 'long' if efftime > 12. else 'short'
try:
psfname, psfmodel = psfname.split(', ') # "name, model"
except ValueError:
# Chromatic PSF: 'short|long blue|red'
if len(psfname.split()) == 2:
psfmodel = 'chromatic'
else: # Classic PSF: 'short|long'
psfmodel = 'classic'
# Convert PSF name (e.g. 'short red') to PSF class name
# ('ShortRed_ExposurePSF')
fnname = ''.join(map(str.capitalize, psfname.split())) + '_ExposurePSF'
psffn = eval(fnname)
if psfmodel.endswith('powerlaw'):
psffn.model = psffn.model + '-powerlaw'
try:
subsampling = hdr['ES_SUB']
except KeyError:
subsampling = 1
psffn.subsampling = subsampling
print("PSF name/model: %s/%s [%s], sub x%d" % \
(psfname, psfmodel, fnname, subsampling))
return psffn
def read_psf_ctes(hdr):
"""Read PSF constants [lmid,alphaDeg,ellDeg] from header."""
lmin = hdr['ES_LMIN']
lmax = hdr['ES_LMAX']
lmid = (lmin + lmax) / 2.
# Count up alpha/ell coefficients (ES_Ann/ES_Enn) to get the
# polynomial degrees
countKeys = ( lambda regexp:
len([k for k in hdr.keys() if re.match(regexp, k)]) )
adeg = countKeys('ES_A\d+$') - 1
edeg = countKeys('ES_E\d+$') - 1
print("PSF constants: lMid=%.2f A, alphaDeg=%d, ellDeg=%d" % \
(lmid, adeg, edeg))
return [lmid, adeg, edeg]
def read_psf_param(hdr):
"""
Read (7+ellDeg+alphaDeg) PSF parameters from header: delta, theta,
xc, yc, xy, e0, ..., en, a0, ..., an.
"""
# Chromatic expansion coefficients
c_ell = [v for k, v in hdr.items() if re.match('ES_E\d+$', k)]
c_alp = [v for k, v in hdr.items() if re.match('ES_A\d+$', k)]
lmin = hdr['ES_LMIN']
lmax = hdr['ES_LMAX']
lmid = (lmin + lmax) / 2. # Middle wavelength [A]
lref = hdr['ES_LREF'] # Reference wavelength [A]
# Convert public polynomial coeffs from lr~ = lambda/LbdaRef - 1 =
# a+b*lr back to internal lr = (2*lambda -
# (lmin+lmax))/(lmax-lmin)
a = (lmin + lmax) / (2. * lref) - 1
b = (lmax - lmin) / (2. * lref)
ecoeffs = polyConvert(c_ell, trans=(a, b), backward=True).tolist()
if 'powerlaw' not in hdr['ES_PSF']:
acoeffs = polyConvert(c_alp, trans=(a, b), backward=True).tolist()
else: # Not needed for powerlaw expansion
acoeffs = c_alp
xref = hdr['ES_XC'] # Reference position [spx] at ref. wavelength
yref = hdr['ES_YC']
try:
xy = hdr['ES_XY'] # xy parameter
except KeyError:
xy = hdr['ES_PA'] # Old name
# This reproduces exactly the PSF parameters used by
# extract_specs(full_cube...)
pressure, temp = read_PT(hdr)
airmass = hdr['ES_AIRM'] # Effective airmass
parang = hdr['ES_PARAN'] # Effective parallactic angle [deg]
adr = TA.ADR(pressure, temp, lref=lmid, airmass=airmass, parangle=parang)
xmid, ymid = adr.refract(
xref, yref, lref, unit=SpxSize, backward=True) # [spx]
print("PSF parameters: airmass=%.3f, parangle=%.1f deg, " \
"refpos=%.2fx%.2f spx @%.2f A" % (airmass, parang, xmid, ymid, lmid))
return [adr.delta, adr.theta, xmid, ymid, xy] + ecoeffs + acoeffs
def estimate_zdpar(inhdr):
"""
Estimate zenithal distance [deg] and parallactic angle [deg] from
header.
"""
from .extern import Coords
ha, dec = Coords.altaz2hadec(inhdr['ALTITUDE'], inhdr['AZIMUTH'],
phi=inhdr['LATITUDE'], deg=True)
zd, parangle = Coords.hadec2zdpar(ha, dec,
phi=inhdr['LATITUDE'], deg=True)
return zd, parangle # [deg]
def read_DDTpos(inhdr):
"""
Read reference wavelength and DDT-estimated position from DDTLREF and
DDT[X|Y]P keywords. Will raise KeyError if keywords are not available.
"""
try:
lddt = inhdr['DDTLREF'] # Ref. wavelength [A]
xddt = inhdr['DDTXP'] # Predicted position [spx]
yddt = inhdr['DDTYP']
except KeyError as err:
raise KeyError("File has no DDT-related keywords (%s)" % err)
# Some sanity check
if not (abs(xddt) < 7 and abs(yddt) < 7):
raise KeyError(
"Invalid DDT position: %.2f x %.2f is outside FoV" % (xddt, yddt))
return lddt, xddt, yddt
def read_cubefit_pos(inhdr, refwave=5000.):
"""
Read reference wavelength and cubefit-estimated position from CBFT_SN[X|Y]
keywords and associated comments. Will raise KeyError if keywords are not
available.
"""
import re
try:
xcf = inhdr['CBFT_SNX']
ycf = inhdr['CBFT_SNY']
except KeyError as err:
raise KeyError("File has no cubefit-related keywords (%s)" % err)
# Some sanity check
if not (abs(xcf) < 7 and abs(ycf) < 7):
raise KeyError(
"Invalid cubefit position: %.2f x %.2f is outside FoV" % (xcf, ycf))
# Get reference wavelength from comment
pattern = "SN x offset from center at (\d+) A \[spaxels\]"
try:
comment = inhdr.comments['CBFT_SNX'] # Read comment
lcf = float(re.match(pattern, comment).group(1)) # Look for ref. wave
except (KeyError, AttributeError): # No comment or no match: use default refwave
lcf = refwave
except ValueError: # Match a non-float value
raise KeyError(
"Invalid cubefit CBFT_SNX comment: '%s'" % comment)
return lcf, xcf, ycf
# Polynomial utilities ======================================================
def polyEval(coeffs, x):
"""
Evaluate polynom sum_i ci*x**i on x. It uses 'natural' convention
for polynomial coeffs: [c0, c1, ..., cn] (opposite to N.polyfit).
"""
if N.isscalar(x):
y = 0 # Faster on scalar
for i, c in enumerate(coeffs):
# Incremental computation of x**i is only slightly faster
y += c * x**i
else: # Faster on arrays
y = N.polyval(coeffs[::-1], x) # Beware coeffs order!
return y
def polyConvMatrix(n, trans=(0, 1)):
"""
Return the upper triangular matrix (i,k) * b**k * a**(i-k), that
converts polynomial coeffs for x~:=a+b*x (P~ = a0~ + a1~*x~ +
a2~*x~**2 + ...) in polynomial coeffs for x (P = a0 + a1*x +
a2*x**2 + ...). Therefore, (a,b)=(0,1) gives identity.
"""
from scipy.misc import comb
a, b = trans
m = N.zeros((n, n), dtype='d')
for r in range(n):
for c in range(r, n):
m[r, c] = comb(c, r) * b**r * a**(c - r)
return m
def polyConvert(coeffs, trans=(0, 1), backward=False):
"""
Converts polynomial coeffs for x (P = a0 + a1*x + a2*x**2 + ...) in
polynomial coeffs for x~:=a+b*x (P~ = a0~ + a1~*x~ + a2~*x~**2 +
...). Therefore, (a,b)=(0,1) makes nothing. If backward, makes the
opposite transformation.
Note: backward transformation could be done using more general
polynomial composition `polyval`, but forward transformation is a
long standing issue in the general case (look for functional
decomposition of univariate polynomial).
"""
a, b = trans
if not backward:
a = -float(a) / float(b)
b = 1 / float(b)
return N.dot(polyConvMatrix(len(coeffs), (a, b)), coeffs)
def polyfit_clip(x, y, deg, clip=3, nitermax=10):
"""
Least squares polynomial fit with sigma-clipping (if
clip>0). Returns polynomial coeffs w/ same convention as
N.polyfit: [cn, ..., c1, c0].
"""
good = N.ones(y.shape, dtype='bool')
niter = 0
while True:
niter += 1
coeffs = N.polyfit(x[good], y[good], deg)
old = good
if clip:
dy = N.polyval(coeffs, x) - y
good = N.absolute(dy) < clip * N.std(dy)
if (good == old).all():
break # No more changes, stop there
if niter > nitermax: # Max. # of iter, stop there
print("polyfit_clip reached max. # of iterations: " \
"deg=%d, clip=%.2f x %f, %d px removed" % \
(deg, clip, N.std(dy), len((~old).nonzero()[0])))
break
if y[good].size <= deg + 1:
raise ValueError("polyfit_clip: Not enough points left (%d) "
"for degree %d" % (y[good].size, deg))
return coeffs
def chebNorm(x, xmin, xmax):
"""Normalization [xmin,xmax] to [-1,1]"""
if xmin != xmax:
return (2 * x - (xmax + xmin)) / (xmax - xmin)
elif x == xmin:
return N.zeros_like(x)
else:
raise ValueError("Invalid Chebychev normalization.")
def chebEval(pars, nx, chebpolys=[]):
"""
Orthogonal Chebychev polynomial expansion, x should be already
normalized in [-1,1].
"""
from scipy.special import chebyu
if len(chebpolys) < len(pars):
print("Initializing Chebychev polynomials up to order %d" % len(pars))
chebpolys[:] = [chebyu(i) for i in range(len(pars))]
return N.sum([par * cheb(nx) for par, cheb in zip(pars, chebpolys)], axis=0)
def powerLawEval(coeffs, x):
"""
Evaluate (curved) power-law: coeffs[-1] * x**(coeffs[-2] +
coeffs[-3]*(x-1) + ...)
Note that f(1) = pars[-1] = alpha(lref) with x = lbda/lref.
"""
return coeffs[-1] * x**N.polyval(coeffs[:-1], x - 1)
def powerLawJac(coeffs, x):
ncoeffs = len(coeffs) # M
jac = N.empty((ncoeffs, len(x)), dtype=x.dtype) # M×N
jac[-1] = x**N.polyval(coeffs[:-1], x - 1) # df/dcoeffs[-1]
jac[-2] = coeffs[-1] * jac[-1] * N.log(x) # df/dcoeffs[-2]
for i in range(-3, -ncoeffs - 1, -1):
jac[i] = jac[i + 1] * (x - 1)
return jac # M×N
def powerLawFit(x, y, deg=2, guess=None):
from .extern import Optimizer as TO
if guess is None:
guess = [0.] * (deg - 1) + [-1., 2.]
else:
assert len(guess) == (deg + 1)
model = TO.Model(powerLawEval, jac=powerLawJac)
data = TO.DataSet(y, x=x)
fit = TO.Fitter(model, data)
lsqPars, msg = SO.leastsq(fit.residuals, guess, args=(x,))
if msg <= 4:
return lsqPars
else:
raise ValueError("powerLawFit did not converge")
# Ellipse utilities ==============================
def quadEllipse(a, b, c, d, f, g):
"""
Ellipse elements (center, semi-axes and PA) from the general
quadratic curve a*x2 + 2*b*x*y + c*y2 + 2*d*x + 2*f*y + g = 0.
http://mathworld.wolfram.com/Ellipse.html
"""
D = N.linalg.det([[a, b, d], [b, c, f], [d, f, g]])
J = N.linalg.det([[a, b], [b, c]])
I = a + c
if not (D != 0 and J > 0 and D / I < 0):
# raise ValueError("Input quadratic curve does not correspond to "
# "an ellipse: D=%f!=0, J=%f>0, D/I=%f<0" % (D,J,D/I))
return 0, 0, -1, -1, 0
elif a == c and b == 0:
# raise ValueError("Input quadratic curve correspond to a circle")
pass
b2mac = b**2 - a*c
# Center of the ellipse
x0 = (c*d - b*f) / b2mac
y0 = (a*f - b*d) / b2mac
# Semi-axes lengthes
ap = N.sqrt(2 * (a * f**2 + c * d**2 + g * b**2 - 2*b*d*f - a*c*g) /
(b2mac * (N.sqrt((a - c)**2 + 4 * b**2) - (a + c))))
bp = N.sqrt(2 * (a * f**2 + c * d**2 + g * b**2 - 2*b*d*f - a*c*g) /
(b2mac * (-N.sqrt((a - c)**2 + 4 * b**2) - (a + c))))
# Position angle
if b == 0:
phi = 0
else:
phi = N.tan((a - c) / (2 * b)) / 2
if a > c:
phi += N.pi / 2
return x0, y0, ap, bp, phi
def flatAndPA(cy2, c2xy):
"""
Return flattening q=b/a and position angle PA [deg] for ellipse
defined by x**2 + cy2*y**2 + 2*c2xy*x*y = 1.
"""
x0, y0, a, b, phi = quadEllipse(1, c2xy, cy2, 0, 0, -1)
assert a > 0 and b > 0, "Input equation does not correspond to an ellipse"
q = b / a # Flattening
pa = phi * TA.RAD2DEG # From rad to deg
return q, pa
# PSF classes ================================================================
class ExposurePSF:
"""
Empirical PSF-3D function used by the `model` class.
Note that the so-called `PA` or `xy` parameter is *not* the PA of
the adjusted ellipse, but half the x*y coefficient. Similarly,
'ell' is not the ellipticity, but the y**2 coefficient: x2 +
ell*y2 + 2*xy*x*y + ... = 0. See `quadEllipse`/`flatAndPA` for
conversion routines.
"""
subsampling = 1 # No subsampling by default
def __init__(self, psf_ctes, cube, coords=None):
"""
Initiating the class.
psf_ctes: Internal parameters (pixel size in cube spatial unit,
reference wavelength and polynomial degrees).
cube: Input cube. This is a `SNIFS_cube` object.
coords: if not None, should be (x,y).
"""
self.spxSize = psf_ctes[0] # Spaxel size [arcsec]
self.lmid = psf_ctes[1] # Reference wavelength [AA]
self.alphaDeg = int(psf_ctes[2]) # Alpha polynomial degree
self.ellDeg = int(psf_ctes[3]) # y**2 (aka 'Ell') polynomial degree
self.npar_cor = 7 + self.ellDeg + self.alphaDeg # PSF parameters
self.npar_ind = 1 # Intensity parameters per slice
self.nslice = cube.nslice
self.npar = self.npar_cor + self.npar_ind * self.nslice
# Name of PSF parameters
self.parnames = ( ['delta', 'theta', 'xc', 'yc', 'xy'] +
['e%d' % i for i in range(self.ellDeg + 1)] +
['a%d' % i for i in range(self.alphaDeg + 1)] +
['i%02d' % (i + 1) for i in range(self.nslice)] )
# Spaxel coordinates [spx]
if coords is None:
self.nlens = cube.nlens
self.x = N.resize(cube.x, (self.nslice, self.nlens))
self.y = N.resize(cube.y, (self.nslice, self.nlens))
else:
x = coords[0].ravel()
y = coords[1].ravel()
assert len(x) == len(y), \
"Incompatible coordinates (%d/%d)" % (len(x), len(y))
self.nlens = len(x)
self.x = N.resize(x, (self.nslice, self.nlens))
self.y = N.resize(y, (self.nslice, self.nlens))
self.l = N.resize(cube.lbda, (self.nlens, self.nslice)).T
if self.nslice > 1:
self.lmin = cube.lstart
self.lmax = cube.lend
self.lrel = chebNorm(self.l, self.lmin, self.lmax) # From -1 to +1
else:
self.lmin, self.lmax = -1, +1
self.lrel = self.l
# ADR in spaxels (nslice, nlens)
if hasattr(cube, 'e3d_data_header'): # Read from cube if possible
pressure, temp = read_PT(cube.e3d_data_header)
else:
pressure, temp = read_PT(None) # Get default values for P and T
self.ADRscale = TA.ADR(
P=pressure, T=temp, lref=self.lmid).get_scale(self.l) / self.spxSize
# Sub-sampling grid: decompose the spaxels into n×n sub-spaxels
eps = N.linspace(-0.5, +0.5, self.subsampling * 2 + 1)[1::2]
# Offsets from center
self.subgrid = tuple(itertools.product(eps, eps))
def comp(self, param, normed=False):
"""
Compute the function.
param: Input parameters for the PSF model:
- param[0:7+n+m]: parameters of the PSF shape
- param[0,1]: Atmospheric dispersion power and parall. angle [rad]
- param[2,3]: X,Y position at reference wavelength
- param[4]: xy parameter
- param[5:6+n]: Ellipticity param. expansion (n+1: # of coeffs)
- param[6+n:7+n+m]: Moffat scale alpha expansion (m+1: # of coeffs)
- param[7+m+n:]: Intensity parameters (one for each slice in the cube)
normed: Should the function be normalized (integral)
"""
self.param = N.asarray(param)
# ADR params
delta = self.param[0]
theta = self.param[1]
xc = self.param[2] # Position at lmid
yc = self.param[3]
# Position at current wavelength
x0 = xc + delta * N.sin(theta) * self.ADRscale # (nslice, nlens)
y0 = yc - delta * N.cos(theta) * self.ADRscale
# Other params
xy = self.param[4]
ellCoeffs = self.param[5:6 + self.ellDeg]
alphaCoeffs = self.param[6 + self.ellDeg:self.npar_cor]
ell = polyEval(ellCoeffs, self.lrel) # (nslice, nlens)
if not self.model.endswith('powerlaw'):
alpha = polyEval(alphaCoeffs, self.lrel)
else:
alpha = powerLawEval(alphaCoeffs, self.l / LbdaRef)
# PSF model
if self.model == 'chromatic': # Includes chromatic correlations
lcheb = chebNorm(self.l, *self.chebRange)
b0 = chebEval(self.beta0, lcheb)
b1 = chebEval(self.beta1, lcheb)
s0 = chebEval(self.sigma0, lcheb)
s1 = chebEval(self.sigma1, lcheb)
e0 = chebEval(self.eta0, lcheb)
e1 = chebEval(self.eta1, lcheb)
else: # Achromatic correlations
b0 = self.beta0
b1 = self.beta1
s0 = self.sigma0
s1 = self.sigma1
e0 = self.eta0
e1 = self.eta1
sigma = s0 + s1 * alpha
beta = b0 + b1 * alpha
eta = e0 + e1 * alpha
val = 0.
for epsx, epsy in self.subgrid:
# Gaussian + Moffat
dx = self.x - x0 + epsx # Center of sub-spaxel
dy = self.y - y0 + epsy
# CAUTION: ell & PA are not the true ellipticity and position angle!
r2 = dx**2 + ell * dy**2 + 2 * xy * dx * dy
gaussian = N.exp(-0.5 * r2 / sigma**2)
moffat = (1 + r2 / alpha**2)**(-beta)
# Function
val += moffat + eta * gaussian
val *= self.param[self.npar_cor:, N.newaxis] / self.subsampling**2
# The 3D psf model is not normalized to 1 in integral. The result must
# be renormalized by (2*eta*sigma**2 + alpha**2/(beta-1)) *
# N.pi/sqrt(ell - xy**2)
if normed:
val /= ( N.pi / N.sqrt(ell - xy**2) *
(2 * eta * sigma**2 + alpha**2 / (beta - 1)) )
return val
def deriv(self, param):
"""
Compute the derivative of the function with respect to its parameters.
param: Input parameters of the polynomial.
A list numbers (see `SNIFS_psf_3D.comp`).
"""
self.param = N.asarray(param)
# ADR params
delta = self.param[0]
theta = self.param[1]
xc = self.param[2]
yc = self.param[3]
costheta = N.cos(theta)
sintheta = N.sin(theta)
x0 = xc + delta * sintheta * self.ADRscale # (nslice, nlens)
y0 = yc - delta * costheta * self.ADRscale
# Other params
xy = self.param[4]
ellCoeffs = self.param[5:6 + self.ellDeg]
alphaCoeffs = self.param[6 + self.ellDeg:self.npar_cor]
ell = polyEval(ellCoeffs, self.lrel)
if not self.model.endswith('powerlaw'):
alpha = polyEval(alphaCoeffs, self.lrel)
else:
alpha = powerLawEval(alphaCoeffs, self.l / LbdaRef)
# PSF model
if self.model == 'chromatic': # Includes chromatic correlations
lcheb = chebNorm(self.l, *self.chebRange)
b0 = chebEval(self.beta0, lcheb)
b1 = chebEval(self.beta1, lcheb)
s0 = chebEval(self.sigma0, lcheb)
s1 = chebEval(self.sigma1, lcheb)
e0 = chebEval(self.eta0, lcheb)
e1 = chebEval(self.eta1, lcheb)
else: # Achromatic correlations
b0 = self.beta0
b1 = self.beta1
s0 = self.sigma0
s1 = self.sigma1
e0 = self.eta0
e1 = self.eta1
sigma = s0 + s1 * alpha
beta = b0 + b1 * alpha
eta = e0 + e1 * alpha
totgrad = N.zeros(
(self.npar_cor + self.npar_ind,) + self.x.shape, 'd')
for epsx, epsy in self.subgrid:
# Gaussian + Moffat
dx = self.x - x0 + epsx
dy = self.y - y0 + epsy
dy2 = dy**2
r2 = dx**2 + ell * dy2 + 2 * xy * dx * dy
sigma2 = sigma**2
gaussian = N.exp(-0.5 * r2 / sigma2)
alpha2 = alpha**2
ea = 1 + r2 / alpha2
moffat = ea**(-beta)
# Derivatives
grad = N.zeros(
(self.npar_cor + self.npar_ind,) + self.x.shape, 'd')
j1 = eta / sigma2
j2 = 2 * beta / ea / alpha2
tmp = gaussian * j1 + moffat * j2
grad[2] = tmp * (dx + xy * dy) # dPSF/dxc
grad[3] = tmp * (ell * dy + xy * dx) # dPSF/dyc
grad[0] = ( self.ADRscale *
(sintheta * grad[2] - costheta * grad[3]) )
grad[1] = ( delta * self.ADRscale *
(sintheta * grad[3] + costheta * grad[2]) )
grad[4] = -tmp * dx * dy # dPSF/dxy
for i in xrange(self.ellDeg + 1): # dPSF/dei
grad[5 + i] = -tmp / 2 * dy2 * self.lrel**i
# dPSF/dalpha
dalpha = ( gaussian * (e1 + s1 * r2 * j1 / sigma) +
moffat * (-b1 * N.log(ea) + r2 * j2 / alpha) )
if not self.model.endswith('powerlaw'):
for i in xrange(self.alphaDeg + 1): # dPSF/dai, i=<0,alphaDeg>
grad[6 + self.ellDeg + i] = dalpha * self.lrel**i
else:
lrel = self.l / LbdaRef
imax = 6 + self.ellDeg + self.alphaDeg
grad[imax] = ( dalpha *
lrel**N.polyval(alphaCoeffs[:-1], lrel - 1) )
if self.alphaDeg:
grad[imax - 1] = grad[imax] * alphaCoeffs[-1] * N.log(lrel)
for i in range(imax - 2, imax - self.alphaDeg - 1, -1):
# dPSF/dai, i=0..alphaDeg
grad[i] = grad[i + 1] * (lrel - 1)
grad[self.npar_cor] = moffat + eta * gaussian # dPSF/dI
totgrad += grad
totgrad[:self.npar_cor] *= self.param[N.newaxis,
self.npar_cor:, N.newaxis]
totgrad /= self.subsampling**2
return totgrad
def _HWHM_fn(self, r, alphaCoeffs, lbda):
"""Half-width at half maximum function (=0 at HWHM)."""
if not self.model.endswith('powerlaw'):
alpha = polyEval(alphaCoeffs, chebNorm(lbda, self.lmin, self.lmax))
else:
alpha = powerLawEval(alphaCoeffs, lbda / LbdaRef)
if self.model == 'chromatic':
lcheb = chebNorm(lbda, *self.chebRange)
b0 = chebEval(self.beta0, lcheb)
b1 = chebEval(self.beta1, lcheb)
s0 = chebEval(self.sigma0, lcheb)
s1 = chebEval(self.sigma1, lcheb)
e0 = chebEval(self.eta0, lcheb)
e1 = chebEval(self.eta1, lcheb)
else:
b0 = self.beta0
b1 = self.beta1
s0 = self.sigma0
s1 = self.sigma1
e0 = self.eta0
e1 = self.eta1
sigma = s0 + s1 * alpha
beta = b0 + b1 * alpha
eta = e0 + e1 * alpha
gaussian = N.exp(-0.5 * r**2 / sigma**2)
moffat = (1 + r**2 / alpha**2)**(-beta)
# PSF=moffat + eta*gaussian, maximum is 1+eta
return moffat + eta * gaussian - (eta + 1) / 2
def FWHM(self, param, lbda):
"""Estimate FWHM of PSF at wavelength lbda."""
alphaCoeffs = param[6 + self.ellDeg:self.npar_cor]
# Compute FWHM from radial profile
fwhm = 2 * SO.fsolve(
func=self._HWHM_fn, x0=1., args=(alphaCoeffs, lbda))
# Beware: scipy-0.8.0 fsolve returns a size 1 array
return N.squeeze(fwhm) # In spaxels
@classmethod
def seeing_powerlaw(cls, lbda, alphaCoeffs):
"""
Estimate power-law chromatic model seeing FWHM [arcsec] at
wavelength `lbda` for alpha coefficients `alphaCoeffs`.
"""
def hwhm(r, alphaCoeffs, lbda):
alpha = powerLawEval(alphaCoeffs, lbda / LbdaRef)
sigma = cls.sigma0 + cls.sigma1 * alpha
beta = cls.beta0 + cls.beta1 * alpha
eta = cls.eta0 + cls.eta1 * alpha
gaussian = N.exp(-0.5 * r**2 / sigma**2)
moffat = (1 + r**2 / alpha**2)**(-beta)
# PSF=moffat + eta*gaussian, maximum is 1+eta
return moffat + eta * gaussian - (eta + 1) / 2
# Compute FWHM from radial profile [spx]
seeing = 2 * SO.fsolve(func=hwhm, x0=1., args=(alphaCoeffs, lbda))
return N.squeeze(seeing) * SpxSize # Spx → arcsec
class Long_ExposurePSF(ExposurePSF):
"""Classic PSF model (achromatic correlations) for long exposures."""
name = 'long'
model = 'classic'
beta0 = 1.685
beta1 = 0.345
sigma0 = 0.545
sigma1 = 0.215
eta0 = 1.04
eta1 = 0.00
class Short_ExposurePSF(ExposurePSF):
"""
Classic PSF model (achromatic correlations) for short exposures.
"""
name = 'short'
model = 'classic'
beta0 = 1.395
beta1 = 0.415
sigma0 = 0.56
sigma1 = 0.2
eta0 = 0.6
eta1 = 0.16
class LongBlue_ExposurePSF(ExposurePSF):
"""
PSF model with chromatic correlations (2nd order Chebychev
polynomial) for long, blue exposures.
"""
name = 'long blue'
model = 'chromatic'
chebRange = (3399., 5100.) # Domain of validity of Chebychev expansion
beta0 = [1.220, 0.016, -0.056] # b00,b01,b02
beta1 = [0.590, 0.004, 0.014] # b10,b11,b12
sigma0 = [0.710, -0.024, 0.016] # s00,s01,s02
sigma1 = [0.119, 0.001, -0.004] # s10,s11,s12
eta0 = [0.544, -0.090, 0.039] # e00,e01,e02
eta1 = [0.223, 0.060, -0.020] # e10,e11,e12
class LongRed_ExposurePSF(ExposurePSF):
"""
PSF model with chromatic correlations (2nd order Chebychev
polynomial) for long, red exposures.
"""
name = 'long red'
model = 'chromatic'
chebRange = (5318., 9508.) # Domain of validity of Chebychev expansion
beta0 = [1.205, -0.100, -0.031] # b00,b01,b02
beta1 = [0.578, 0.062, 0.028] # b10,b11,b12
sigma0 = [0.596, 0.044, 0.011] # s00,s01,s02
sigma1 = [0.173, -0.035, -0.008] # s10,s11,s12
eta0 = [1.366, -0.184, -0.126] # e00,e01,e02
eta1 = [-0.134, 0.121, 0.054] # e10,e11,e12
class ShortBlue_ExposurePSF(ExposurePSF):
"""
PSF model with chromatic correlations (2nd order Chebychev
polynomial) for short, blue exposures.
"""
name = 'short blue'
model = 'chromatic'
chebRange = (3399., 5100.) # Domain of validity of Chebychev expansion
beta0 = [1.355, 0.023, -0.042] # b00,b01,b02
beta1 = [0.524, -0.012, 0.020] # b10,b11,b12
sigma0 = [0.492, -0.037, 0.000] # s00,s01,s02
sigma1 = [0.176, 0.016, 0.000] # s10,s11,s12
eta0 = [0.499, 0.080, 0.061] # e00,e01,e02
eta1 = [0.316, -0.015, -0.050] # e10,e11,e12
class ShortRed_ExposurePSF(ExposurePSF):
"""
PSF model with chromatic correlations (2nd order Chebychev
polynomial) for short, red exposures.
"""
name = 'short red'
model = 'chromatic'
chebRange = (5318., 9508.) # Domain of validity of Chebychev expansion
beta0 = [1.350, -0.030, -0.012] # b00,b01,b02
beta1 = [0.496, 0.032, 0.020] # b10,b11,b12
sigma0 = [0.405, -0.003, 0.000] # s00,s01,s02
sigma1 = [0.212, -0.017, 0.000] # s10,s11,s12
eta0 = [0.704, -0.060, 0.044] # e00,e01,e02
eta1 = [0.343, 0.113, -0.045] # e10,e11,e12
class Hyper_PSF3D_PL(object):
"""
Hyper-term to be added to 3D-PSF fit: priors on ADR parameters,
alpha power-law chromatic expansion, PSF shape parameters and
point-source position.
"""
positionAccuracy = 0.2 # Loose position prior accuracy [spx]
def __init__(self, psf_ctes, inhdr, seeing=None, position=None,
scale=1., verbose=False):
alphaDeg = psf_ctes[2] # Alpha expansion degree
ellDeg = psf_ctes[3] # Ellipticity expansion degree
if alphaDeg != 2:
raise NotImplementedError("Hyper-term trained for alphaDeg=2 only")
if ellDeg != 0:
raise NotImplementedError("Hyper-term trained for ellDeg=0 only")
self.alphaSlice = slice(6 + ellDeg, 7 + ellDeg + alphaDeg)
self.X = inhdr['CHANNEL'][0].upper() # 'B' or 'R'
if self.X not in ('B', 'R'):
raise KeyError("Unknown channel '%s'" % inhdr['CHANNEL'])
self.scale = scale # Global hyper-scaling
# Compute predictions and associated accuracy
self._predict_ADR(inhdr, verbose=verbose) # ADR parameters
self._predict_shape(inhdr, verbose=verbose) # Shape (xy & y2) params
self._predict_PL(seeing, verbose=verbose) # Power-law expansion coeffs
# Position at ref. wavelength
self._predict_pos(position, verbose=verbose)
@classmethod
def predict_adr_params(cls, inhdr):
"""
Predict ADR parameters delta and theta [rad] from header `inhdr`
including standard keywords `AIRMASS`, `PARANG` (parallactic
angle [deg]), and `CHANNEL`.
"""
# 0th-order estimates
delta0 = N.tan(N.arccos(1. / inhdr['AIRMASS']))
theta0 = inhdr['PARANG'] / TA.RAD2DEG # Parallactic angle [rad]
# 1st-order corrections from ad-hoc linear regressions
sinpar = N.sin(theta0)
cospar = N.cos(theta0)
X = inhdr['CHANNEL'][0].upper() # 'B' or 'R'
if X == 'B': # Blue
ddelta1 = -0.00734 * sinpar + 0.00766
dtheta1 = -0.554 * cospar + 3.027 # [deg]
elif X == 'R': # Red
ddelta1 = +0.04674 * sinpar + 0.00075
dtheta1 = +3.078 * cospar + 4.447 # [deg]
else:
raise KeyError("Unknown channel '%s'" % inhdr['CHANNEL'])
# Final predictions
delta = delta0 + ddelta1
theta = theta0 + dtheta1 / TA.RAD2DEG # [rad]
return delta, theta
@classmethod
def predict_alpha_coeffs(cls, seeing, channel):
"""
Predict power-law expansion alpha coefficients from seeing for
given channel.
"""
if channel == 'B':
coeffs = N.array([
-0.134 * seeing + 0.5720, # p0
-0.134 * seeing - 0.0913, # p1
+3.474 * seeing - 1.3880]) # p2
elif channel == 'R':
coeffs = N.array([
-0.0777 * seeing + 0.1741, # p0
-0.0202 * seeing - 0.3434, # p1
+3.4000 * seeing - 1.352]) # p2
else:
raise KeyError("Unknown channel '%s'" % channel)
return coeffs
@classmethod
def predict_y2_param(cls, inhdr):
"""Predict shape parameter y2."""
# Ad-hoc linear regressions
airmass = inhdr['AIRMASS']
X = inhdr['CHANNEL'][0].upper() # 'B' or 'R'
if X == 'B': # Blue
y2 = -0.323 * airmass + 1.730
elif X == 'R': # Red
y2 = -0.442 * airmass + 1.934
else:
raise KeyError("Unknown channel '%s'" % inhdr['CHANNEL'])
return y2
def _predict_ADR(self, inhdr, verbose=False):
"""
Predict ADR parameters delta,theta and prediction accuracy
ddelta,dtheta, for use in hyper-term computation. 1st-order
corrections and model dispersions were obtained from faint
standard star ad-hoc analysis (`adr.py` and `runaway.py`).
"""
self.delta, self.theta = self.predict_adr_params(inhdr)
# Final model dispersion
if self.X == 'B': # Blue
self.ddelta = 0.0173
self.dtheta = 1.651 # [deg]
else: # Red
self.ddelta = 0.0122
self.dtheta = 1.453 # [deg]
self.dtheta /= TA.RAD2DEG # [rad]
if verbose:
print("ADR parameter predictions:")
print(" Header: δ=% .2f, θ=%+.2f°" % \
(N.tan(N.arccos(1. / inhdr['AIRMASS'])),
inhdr['PARANG'] / TA.RAD2DEG))
print(" Parameters: δ=% .2f, θ=%+.2f°" % \
(self.delta, self.theta * TA.RAD2DEG))
print(" dParam: Δδ=% .2f, Δθ=% .2f°" % \
(self.ddelta, self.dtheta * TA.RAD2DEG))
def _predict_PL(self, seeing, verbose=False):
"""
Predict ADR parameters power-law parameters {p_i} and prediction
precision matrix cov^{-1}({p_i}), for use in hyper-term
computation. 1st-order corrections and model dispersions were
obtained from faint standard star ad-hoc analysis (`adr.py`
and `runaway.py`).
"""
if seeing is None:
self.plpars = None # No prediction
return
# Predict power-law expansion coefficients and precision matrix
if self.X == 'B': # Blue
self.plpars = self.predict_alpha_coeffs(seeing, self.X)
self.plicov = N.array( # Precision matrix = 1/Cov
[[43.33738708, -66.87684631, -0.23146413],
[-66.87684631, 242.87202454, 4.43127346],
[-0.231464, 4.43127346, 12.65395737]])
else: # Red
self.plpars = self.predict_alpha_coeffs(seeing, self.X)
self.plicov = N.array( # Precision matrix = 1/Cov
[[476.81713867, 19.62824821, 23.05086708],
[19.62825203, 612.26849365, 11.54866409],
[23.05086899, 11.54866314, 11.4956665]])
if verbose:
print("Power-law expansion coefficient predictions:")
print(" Seeing prior: %.2f\"" % seeing)
print(" Parameters: p0=%+.3f p1=%+.3f p2=%+.3f" % \
tuple(self.plpars))
print(" ~dParams: dp0=% .3f dp1=% .3f dp2=% .3f" % \
tuple(self.plicov.diagonal()**(-0.5)))
def _predict_shape(self, inhdr, verbose=False):
"""
Predict shape parameters y2,xy and prediction accuracy dy2,dxy, for
use in hyper-term computation. 1st-order corrections and model
dispersions were obtained from faint standard star ad-hoc
analysis (`runaway.py`).
"""
self.y2 = self.predict_y2_param(inhdr)
self.xy = 0. # Pure dispersion
# Final model dispersion
if self.X == 'B': # Blue
self.dy2 = 0.221
self.dxy = 0.041
else: # Red
self.dy2 = 0.269
self.dxy = 0.050
if verbose:
print("Shape parameter predictions:")
print(" Airmass: %+.2f" % inhdr['AIRMASS'])
print(" Parameters: y²=% .3f, xy=% .3f" % (self.y2, self.xy))
print(" dParam: Δy²=% .3f, Δxy=% .3f" % (self.dy2, self.dxy))
def _predict_pos(self, position, verbose=False):
"""
Predict position (x,y) and prediction accuracy (dx,dy) at
reference wavelength, for use in hyper-term computation.
"""
self.position = position # None or (x,y)
self.dposition = (
self.positionAccuracy, self.positionAccuracy) # [spx]
if verbose and self.position is not None:
print("Position predictions:")
print(" Parameters: x=% .3f, y=% .3f" % self.position)
print(" dParam: Δx=% .3f, Δy=% .3f" % self.dposition)
def comp(self, param):
"""
Input parameters, same as `ExposurePSF.comp`, notably:
- param[0,1]: ADR power (delta) and parallactic angle (theta[rad])
- param[2,3]: X,Y position at reference wavelength
- param[4]: xy parameter
- param[5:6+n]: Ellipticity param. expansion (n+1: # of coeffs)
- param[6+n:7+n+m]: Moffat scale alpha expansion (m+1: # of coeffs)
"""
# Term from ADR parameters
hadr = ( ((param[0] - self.delta) / self.ddelta)**2 +
((param[1] - self.theta) / self.dtheta)**2 )
# Term from shape parameters
hsha = ( ((param[4] - self.xy) / self.dxy)**2 +
((param[5] - self.y2) / self.dy2)**2 )
if self.plpars is not None:
# Term from PL parameters
dalpha = param[self.alphaSlice] - self.plpars
# Faster than dalpha.dot(self.plicov).dot(dalpha)
hpl = N.dot(N.dot(dalpha, self.plicov), dalpha)
else:
hpl = 0.
# Term from position
if self.position is not None:
hpos = ( ((param[2] - self.position[0]) / self.dposition[0])**2 +
((param[3] - self.position[1]) / self.dposition[1])**2 )
else:
hpos = 0.
return self.scale * (hadr + hsha + hpl + hpos) # Scalar ()
def deriv(self, param):
hjac = N.zeros(len(param)) # Half jacobian
# ADR parameter jacobian
hjac[0] = (param[0] - self.delta) / self.ddelta**2
hjac[1] = (param[1] - self.theta) / self.dtheta**2
# Shape parameter jacobian
hjac[4] = (param[4] - self.xy) / self.dxy**2
hjac[5] = (param[5] - self.y2) / self.dy2**2
if self.plpars is not None:
# PL-expansion parameter jacobian
hjac[self.alphaSlice] = N.dot(
self.plicov, param[self.alphaSlice] - self.plpars)
# Position jacobian
if self.position is not None:
hjac[2] = (param[2] - self.position[0]) / self.dposition[0]**2
hjac[3] = (param[3] - self.position[1]) / self.dposition[1]**2
return self.scale * 2 * hjac # (npar,)
def __str__(self):
s = "PSF3D_PL hyper-term: hyper-scale=%.2f" % self.scale
s += "\n ADR: delta=% 7.2f +/- %.2f" % (self.delta, self.ddelta)
s += "\n theta=%+7.2f +/- %.2f deg" % \
(self.theta * TA.RAD2DEG, self.dtheta * TA.RAD2DEG)
s += "\n Shape: xy=% 5.3f +/- %.3f" % (self.xy, self.dxy)
s += "\n y2=% 5.3f +/- %.3f" % (self.y2, self.dy2)
if self.plpars is not None:
dplpars = self.plicov.diagonal()**(-0.5) # Approximate variance
s += "\n PL: p0=%+.3f +/- %.3f" % (self.plpars[0], dplpars[0])
s += "\n p1=%+.3f +/- %.3f" % (self.plpars[1], dplpars[1])
s += "\n p2=%+.3f +/- %.3f" % (self.plpars[2], dplpars[2])
if self.position is not None:
s += "\n Position: x=% 5.3f +/- %.3f" % \
(self.position[0], self.dposition[0])
s += "\n y=% 5.3f +/- %.3f" % \
(self.position[1], self.dposition[1])
return s
class Hyper_PSF2D_PL(Hyper_PSF3D_PL):
"""
Hyper-term to be added to 2D-PSF fit: priors on alpha (seeing), xy
and y2 shape terms.
"""
dalpha = 0.15 # Relaxed achromatic accuracy
def __init__(self, lbda, seeing, airmass, channel,
position=None, scale=1., verbose=False):
# Mimic PSF constantes and input header
psf_ctes = [None, None, 2, 0]
inhdr = {'CHANNEL': channel,
'AIRMASS': airmass,
'PARANG': 0.}
Hyper_PSF3D_PL.__init__(self, psf_ctes, inhdr, seeing,
position=position, scale=scale, verbose=False)
self.alpha = self.predict_alpha(lbda)
def predict_alpha(self, lbda):
"""Predict alpha at wavelength `lbda`."""
if self.plpars is None:
return None
alpha = powerLawEval(self.plpars, lbda / LbdaRef)
# Adjust prediction with linear regression in wavelength
dalpha = -2.35e-05 * lbda + 0.156 # Up to 0.1 correction
return alpha - dalpha # Total prediction
def comp(self, param):
"""
Input parameters, notably:
- param[0,1]: ADR delta and theta (kept fixed to 0)
- param[2,3]: X,Y position at reference wavelength
- param[4]: xy parameter
- param[5]: Ellipticity parameter
- param[6]: Moffat scale alpha
- param[7]: Pount-source intensity
"""
# Terms from xy- and y2-parameters
h = (((param[4] - self.xy) / self.dxy)**2 +
((param[5] - self.y2) / self.dy2)**2)
if self.plpars is not None:
# Term from alpha
h += ((param[6] - self.alpha) / self.dalpha)**2
if self.position is not None:
# Term from point-source position
h += ( ((param[2] - self.position[0]) / self.dposition[0])**2 +
((param[3] - self.position[1]) / self.dposition[1])**2 )
return self.scale * h
def deriv(self, param):
hjac = N.zeros(len(param)) # Half jacobian
hjac[4] = (param[4] - self.xy) / self.dxy**2
hjac[5] = (param[5] - self.y2) / self.dy2**2
if self.plpars is not None:
hjac[6] = (param[6] - self.alpha) / self.dalpha**2
if self.position is not None:
hjac[2] = (param[2] - self.position[0]) / self.dposition[0]**2
hjac[3] = (param[3] - self.position[1]) / self.dposition[1]**2
return self.scale * 2 * hjac # (npar,)
def __str__(self):
s = "PSF2D_PL hyper-term: hyper-scale=%.2f" % self.scale
s += "\n Pred. xy: %+.3f +/- %.3f" % (self.xy, self.dxy)
s += "\n Pred. y2: %+.3f +/- %.3f" % (self.y2, self.dy2)
if self.plpars is not None:
s += "\n Pred. alpha: %.2f +/- %.2f (%.2f\" at %.0f A)" % \
(self.alpha, self.dalpha,
Long_ExposurePSF.seeing_powerlaw(LbdaRef, self.plpars),
LbdaRef)
if self.position is not None:
s += "\n Pred. x: %+.2f +/- %.2f" % (self.position[0],
self.dposition[0])
s += "\n Pred. y: %+.2f +/- %.2f" % (self.position[1],
self.dposition[1])
return s
class StepJ:
name = 'stepJ'
parnames = ['mean', 'diff']
npar_cor = 0 # Cube-level parameters
npar_ind = 2 # Slice-level parameters
def __init__(self, ijlim=(7, 7), cube=None):
self.ilim, self.jlim = ijlim
self.nlens = cube.nlens # Spatial dimension (# of spx)
self.nslice = cube.nslice # Wavelength dimension (# of wavelengths)
self.npar = self.npar_cor + self.npar_ind * self.nslice # Total
# Spaxel coordinates (nlens,)
self.i = cube.i
self.j = cube.j
self.lower_part = self.in_lower_part(self.i, self.j)
def in_lower_part(self, i, j):
"""Step definition."""
return (j < self.jlim) | ((j == self.jlim) & (i > self.ilim))
def comp(self, param):
"""
Step background.
- param[:nslice]: mean intensities '(lower + upper)/2'
- param[nslice:]: intensity differences wrt mean '(lower - upper)/2'
"""
self.param = N.asarray(param)
mean = self.param[:self.nslice]
diff = self.param[self.nslice:]
bkgnd = N.empty((self.nslice, self.nlens), dtype='d')
bkgnd[:, self.lower_part] = (mean + diff)[:, N.newaxis]
bkgnd[:, ~self.lower_part] = (mean - diff)[:, N.newaxis]
return bkgnd # (nslice, nlens)
def deriv(self, param):
self.param = N.asarray(param)
jac = N.ones((2, self.nslice, self.nlens), dtype='d')
jac[1, :, ~self.lower_part] = -1 # dStep/ddiff
return jac # (2*nslice, nslice, nlens)
|
snfactory/extract-star
|
extract_star/libExtractStar.py
|
Python
|
mit
| 75,122
|
[
"Gaussian"
] |
68ee06ddc75c1f2a6b820cc2d44f7350aace63fedd2d8657635d5b1bfcc99d31
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2020 Jan Sparreboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# $Id$
"""Reports/Text Reports/Todo Report"""
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.display.place import displayer as place_displayer
from gramps.gen.plug import docgen
import gramps.gen.datehandler
from gramps.gen.filters import GenericFilterFactory
from gramps.gen.filters import rules
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import Report
from gramps.gen.errors import ReportError
import gramps.gen.plug.report.utils as ReportUtils
from gramps.gen.plug.menu import EnumeratedListOption, BooleanOption
#------------------------------------------------------------------------
# Internationalisation
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
_REF_HANDLE_POS = 0
_NOTE_HANDLE_POS = 1
_PLACEHOLDER = "_" * 12
#------------------------------------------------------------------------
#
# TodoReport
#
#------------------------------------------------------------------------
class TodoReport(Report):
"""Produce a report listing all notes with a given marker.
Based on the Marker report, but starting with the notes flagged with a
particular marker (chosen at run-time). The records that the note
references are included in the report so you do not have to duplicate
that information in the note.
"""
def __init__(self, database, options, user):
"""
Create the Report object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gramps.gen.user.User() instance
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.tag = menu.get_option_by_name('tag').get_value()
if not self.tag:
raise ReportError(_('ToDo Report'),
_('You must first create a tag before running this report.'))
self.can_group = menu.get_option_by_name('can_group').get_value()
def write_report(self):
"""
Generate the report document
"""
self.doc.start_paragraph(_("TR-Title"))
title = _("Report on Notes Tagged '%s'") % self.tag
mark = docgen.IndexMark(title, docgen.INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
# get all the notes in the database tagged Todo
nlist = self.database.get_note_handles()
FilterClass = GenericFilterFactory('Note')
my_filter = FilterClass()
my_filter.add_rule(rules.note.HasTag([self.tag]))
note_list = my_filter.apply(self.database, nlist)
if self.can_group:
self._write_grouped_notes(note_list)
else:
self._write_sorted_notes(note_list)
def _write_grouped_notes(self, note_list):
"""
Return a dictionary of notes keyed by the referenced object's class name
"""
# now group the notes by type
note_groups = dict()
for note_handle in note_list:
refs = self.database.find_backlink_handles(note_handle)
try:
# grouping by the first reference
(class_name, r_handle) = list(refs)[0]
if class_name in note_groups:
note_groups[class_name].append((r_handle, note_handle))
else:
note_groups[class_name] = [(r_handle, note_handle)]
except IndexError:
# no back-links were found
pass
for k in sorted(note_groups.keys(), reverse=True):
# now sort the handles based on the class name, if we don't find
# a match, the data will not be sorted.
if k == "Family":
note_list = sorted(note_groups[k], key=self.getFamilyKey)
elif k == "Person":
note_list = sorted(note_groups[k], key=self.getPersonKey)
elif k == "Event":
note_list = sorted(note_groups[k], key=self.getEventKey)
elif k == "Place":
note_list = sorted(note_groups[k], key=self.getPlaceKey)
else:
note_list = note_groups[k]
self._write_notes(note_list, k)
def _write_sorted_notes(self, note_list):
all_notes = []
for note_handle in note_list:
refs = self.database.find_backlink_handles(note_handle)
# grouping by the first reference
try:
(class_name, r_handle) = list(refs)[0]
if class_name == "Family":
key = self.getFamilyKey((r_handle,))
elif class_name == "Person":
key = self.getPersonKey((r_handle,))
elif class_name == "Event":
key = self.getEventKey((r_handle,))
elif class_name == "Place":
key = self.getPlaceKey((r_handle,))
else:
note = self.database.get_note_from_handle(note_handle)
key = note.get_gramps_id()
all_notes.append((key, note_handle))
except IndexError:
# no back-link references were found, so we'll use the note ID
# as the key
note = self.database.get_note_from_handle(note_handle)
key = note.get_gramps_id()
self._write_notes(sorted(all_notes))
def _write_references(self, note_handle):
"""
Find the primary references attached the note and add them to the report
"""
refs = self.database.find_backlink_handles(note_handle)
for (class_name, r_handle) in refs:
if class_name == "Family":
self._write_family(r_handle)
elif class_name == "Person":
self._write_person(r_handle)
elif class_name == "Event":
self._write_event(r_handle)
elif class_name == "Place":
self._write_place(r_handle)
def _write_notes(self, note_list, title=None):
"""
Generate a table for the list of notes
"""
if not note_list:
return
if title is not None:
self.doc.start_paragraph(_("TR-Heading"))
header = _(title)
mark = docgen.IndexMark(header, docgen.INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table(_('NoteTable'),_('TR-Table'))
self.doc.start_row()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal-Bold'))
self.doc.write_text(_("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'), 3)
self.doc.start_paragraph(_('TR-Normal-Bold'))
self.doc.write_text(_("Text"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for handles in note_list:
note_handle = handles[_NOTE_HANDLE_POS]
note = self.database.get_note_from_handle(note_handle)
self.doc.start_row()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text(note.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'), 3)
self.doc.write_styled_note(note.get_styledtext(),
note.get_format(), _('TR-Note'))
self.doc.end_cell()
self.doc.end_row()
self._write_references(note_handle)
self.doc.start_row()
self.doc.start_cell(_('TR-BorderCell'), 4)
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text('')
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def _write_person(self, person_handle):
"""
Generate a table row for a person record
"""
person = self.database.get_person_from_handle(person_handle)
self.doc.start_row()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text(person.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
name = name_displayer.display(person)
mark = ReportUtils.get_person_mark(self.database, person)
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text(name, mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
birth_ref = person.get_birth_ref()
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
self.doc.write_text(_("b. ") + gramps.gen.datehandler.get_date( event ))
else:
self.doc.write_text(_("b. ") + "_" * 12)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
death_ref = person.get_death_ref()
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
self.doc.write_text(_("d. ") + gramps.gen.datehandler.get_date( event ))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def _write_family(self, family_handle):
"""
Generate a table row for this family record
"""
family = self.database.get_family_from_handle(family_handle)
self.doc.start_row()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text(family.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
father_handle = family.get_father_handle()
if father_handle:
father = self.database.get_person_from_handle(father_handle)
mark = ReportUtils.get_person_mark(self.database, father)
self.doc.write_text(name_displayer.display(father), mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.database.get_person_from_handle(mother_handle)
mark = ReportUtils.get_person_mark(self.database, mother)
self.doc.write_text(name_displayer.display(mother), mark)
self.doc.end_paragraph()
self.doc.end_cell()
# see if we can find a relationship event to include
relationship_date = _PLACEHOLDER
for evt_ref in family.get_event_ref_list():
evt_handle = evt_ref.get_reference_handle()
evt = self.database.get_event_from_handle(evt_handle)
# FIXME: where are the event types defined in Gramps,
# and are these the only important ones?
#print repr(evt.get_type().string)
if evt.get_type().string in ["Marriage", "Civil Union"]:
relationship_date = gramps.gen.datehandler.get_date(evt)
rel_msg = _("%(relationship_type)s on %(relationship_date)s") % {'relationship_type': family.get_relationship(),
'relationship_date': relationship_date}
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text(rel_msg)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def _write_event(self, event_handle):
"""
Generate a table row for this event record
"""
event = self.database.get_event_from_handle(event_handle)
self.doc.start_row()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text(event.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
date = gramps.gen.datehandler.get_date(event)
if date:
self.doc.write_text(date)
else:
self.doc.write_text(_("date: ") + _PLACEHOLDER)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
place_handle = event.get_place_handle()
place = ReportUtils.place_name(self.database, place_handle)
if place:
self.doc.write_text(place)
else:
self.doc.write_text(_("place: ") + _PLACEHOLDER)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
descr = event.get_description()
if descr:
self.doc.write_text( descr )
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
def _write_place(self, place_handle):
"""
Generate a table row with the place record information.
"""
place = self.database.get_place_from_handle(place_handle)
self.doc.start_row()
self.doc.start_cell(_('TR-TableCell'))
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text(place.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell(_('TR-TableCell'), 3)
self.doc.start_paragraph(_('TR-Normal'))
self.doc.write_text(place_displayer.display(self.database, place))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
#
# Sort Functions
#
def getPersonKey(self, group_entry):
"""
Return a string of the persons name (last, first) as the key
"""
per_handle = group_entry[_REF_HANDLE_POS]
person = self.database.get_person_from_handle(per_handle)
sort_key = person.get_primary_name().get_name()
return sort_key.upper()
def getFamilyKey(self, group_entry):
"""
Return a string with the father's or mother's name (in that order) as the key
"""
sort_key = ""
person = None
family_handle = group_entry[_REF_HANDLE_POS]
family = self.database.get_family_from_handle(family_handle)
if family:
father_handle = family.get_father_handle()
if father_handle:
person = self.database.get_person_from_handle(father_handle)
else:
mother_handle = family.get_mother_handle()
if mother_handle:
person = self.database.get_person_from_handle(mother_handle)
if person is not None:
sort_key = person.get_primary_name().get_name()
return sort_key.upper()
def getEventKey(self, group_entry):
"""Return the event date as a string to use for sorting the events.
I'm returning the date with 'zz' prefixed so it will sort at the bottom
when not using grouping.
"""
evt_handle = group_entry[_REF_HANDLE_POS]
event = self.database.get_event_from_handle(evt_handle)
date = event.get_date_object()
return "zz" + str(date)
def getPlaceKey(self, group_entry):
"""
Return the place description to use when sorting the place records.
"""
p_handle = group_entry[_REF_HANDLE_POS]
place = self.database.get_place_from_handle(p_handle)
title = place_displayer.display(self.database, place)
return title.upper()
#------------------------------------------------------------------------
# MarkerOptions
#------------------------------------------------------------------------
class TodoOptions(MenuReportOptions):
"""Set up the options dialog for this report"""
def __init__(self, name, dbase):
"""Create the object and initialize the parent class"""
self.__db = dbase
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the marker report.
"""
category_name = _("Report Options")
all_tags = []
for handle in self.__db.get_tag_handles():
tag = self.__db.get_tag_from_handle(handle)
all_tags.append(tag.get_name())
if len(all_tags) > 0:
tag_option = EnumeratedListOption(_('Tag'), all_tags[0])
for tag_name in all_tags:
tag_option.add_item(tag_name, tag_name)
else:
tag_option = EnumeratedListOption(_('Tag'), '')
tag_option.add_item('', '')
tag_option.set_help( _("The tag to use for the report"))
menu.add_option(category_name, "tag", tag_option)
can_group = BooleanOption(_("Group by reference type"), False)
can_group.set_help( _("Group notes by Family, Person, Place, etc."))
menu.add_option(category_name, "can_group", can_group)
def make_default_style(self, default_style):
"""Make the default output style for the Todo Report."""
# Paragraph Styles
font = docgen.FontStyle()
font.set_size(16)
font.set_type_face(docgen.FONT_SANS_SERIF)
font.set_bold(1)
para = docgen.ParagraphStyle()
para.set_header_level(1)
para.set_bottom_border(1)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_font(font)
para.set_alignment(docgen.PARA_ALIGN_CENTER)
para.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style(_("TR-Title"), para)
font = docgen.FontStyle()
font.set(face=docgen.FONT_SANS_SERIF, size=14, italic=1)
para = docgen.ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for the section headers.'))
default_style.add_paragraph_style(_("TR-Heading"), para)
font = docgen.FontStyle()
font.set_size(12)
para = docgen.ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_font(font)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style(_("TR-Normal"), para)
font = docgen.FontStyle()
font.set_size(12)
font.set_bold(True)
para = docgen.ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_font(font)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_description(_('The basic style used for table headings.'))
default_style.add_paragraph_style(_("TR-Normal-Bold"), para)
para = docgen.ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_description(_('The basic style used for the note display.'))
default_style.add_paragraph_style(_("TR-Note"), para)
#Table Styles
cell = docgen.TableCellStyle()
cell.set_description(_('The basic style used for the table cell display.'))
default_style.add_cell_style(_('TR-TableCell'), cell)
cell = docgen.TableCellStyle()
cell.set_bottom_border(1)
cell.set_description(_('The basic style used for the table border cell display.'))
default_style.add_cell_style(_('TR-BorderCell'), cell)
table = docgen.TableStyle()
table.set_width(100)
table.set_columns(4)
table.set_column_width(0, 10)
table.set_column_width(1, 30)
table.set_column_width(2, 30)
table.set_column_width(3, 30)
table.set_description(_('The basic style used for the table display.'))
default_style.add_table_style(_('TR-Table'), table)
|
gramps-project/addons-source
|
ToDoReport/TodoReport.py
|
Python
|
gpl-2.0
| 22,009
|
[
"Brian"
] |
1885bb73e85b2a595dc50abcab4e57c151c9bc487c644e02e98bfc1971242bfd
|
#!/usr/bin/env python
# encoding: utf-8
# ======================================================================
# Package: pyclaw.io
# File: __init__.py
# Created: Feb 10, 2008
# Author: Kyle Mandli
# ======================================================================
"""Output package for Pyclaw"""
import logging
from ascii import read_ascii,write_ascii
from netcdf import read_netcdf, write_netcdf
__all__ = ['read_ascii','write_ascii','read_netcdf','write_netcdf']
# Check for HDF 5 support
try:
import h5py
from hdf5 import read_hdf5,write_hdf5
__all__ += ['read_hdf5','write_hdf5']
except:
logging.debug("No hdf5 support found.")
# Check for netcdf 4 support
try:
import netCDF4
from netcdf import read_netcdf, write_netcdf
__all__ += ['read_netcdf','write_netcdf']
except(ImportError):
logging.debug("No netcdf4 support found.")
# Check for netcdf 3 support
try:
import Scientific.IO.NetCDF
from netcdf import read_netcdf, write_netcdf
__all__ += ['read_netcdf','write_netcdf']
except(ImportError):
logging.debug("No netcdf3 support found.")
# Check for netcdf 3 support
try:
import Scientific.IO.NetCDF
from netcdf import read_netcdf, write_netcdf
__all__ += ['read_netcdf','write_netcdf']
except(ImportError):
logging.debug("No netcdf3 support found.")
|
clawpack/clawpack-4.x
|
python/pyclaw/io/__init__.py
|
Python
|
bsd-3-clause
| 1,375
|
[
"NetCDF"
] |
700c56a24afcde795b6cfcd08ccbadb7e2f16c14c1e9d8cdb3cec604639eb267
|
"""
# Copyright Nick Cheng, Brian Harrington, Danny Heap, Venkat Korapaty,
# 2013, 2014, 2015
# Distributed under the terms of the GNU General Public License.
#
# This file is part of Assignment 2, CSCA48, Winter 2015
#
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
"""
# Do not change this import statement, or add any of your own!
from regextree import RegexTree, StarTree, DotTree, BarTree, Leaf
# Do not change anything above this comment except for the copyright
# statement
# Student code below this comment.
def is_regex(s):
'''(string) -> bool
Takes a string s, and returns True if it is a valid regular expression
else, returns False
>>> is_regex('0')
True
>>> is_regex('2*')
True
>>> is_regex('((1.(0|2)*).0)')
True
>>> is_regex('wot')
False
>>> is_regex('(3.0)')
False
'''
# base case
if s == '':
return False
# checks if the regex is either '1', '2', '0' or 'e'
if len(s) == 1:
if s in {'0', '1', '2', 'e'}:
return True
return False
# if the regex has an asterick at the end, recurses without the asterick
elif s[-1] == '*':
return is_regex(s[:-1])
# if the regex's have brackets at the end
elif s[0] == '(' and s[len(s) - 1] == ')':
# finds location of the root
dot_bar = dot_bar_loca(s)
# if the root return is False, then it is an invalid regex
if dot_bar is False:
return False
# recurse by splitting them and retuns True if they are both True
return is_regex(s[1:dot_bar]) and is_regex(s[dot_bar + 1:len(s) - 1])
else:
return False
def parenthesis(s, count=0):
'''(str, int) -> int or bool
Finds the closing parethesis index for the respective opening parenthesis.
REQ: s[0] == '('
REQ: amount of opening brackets == amount of closing brackets
>>> parenthesis('(1|2)')
4
>>> parenthesis('(((1.(0|2)*).0))')
15
>>> parenthesis('((()))')
5
'''
if s == '':
return False
# if it starts with an opening bracket, recurses and adds to the count
if s[0] == '(':
return 1 + parenthesis(s[1:], count + 1)
# if it is not a closing bracket, dont add to the count and recurse
if s[0] != ')':
return 1 + parenthesis(s[1:], count)
# if it is a closing bracket
elif s[0] == ')':
# it has reached the end and returns the index
if count - 1 == 0:
return 0
# it has reached a closing bracket before the desired one
return 1 + parenthesis(s[1:], count - 1)
def all_regex_permutations(s):
'''(str or list) -> list
Takes in a regular expression and returns all permutations of it
that are valid regular expressions
(Brute-force method)
>>> all_regex_permutations('3')
set()
>>> all_regex_permutations('2*')
{'2*'}
>>> all_regex_permutations('(1.e)')
{'(e.1)', '(1.e)'}
>>> all_regex_permutations('((e*.1)|1)')
{'((e.1)|1)*', '((1|1).e)*', '((1|e).1*)', '((1*|e).1)', '(1.(e|1*))',\
'(e.(1|1))*', '(1.(1*|e))', '((e.1*)|1)', '(1.(1|e*))', '(1.(e|1))*',\
'(e|(1.1))*', '(1.(e|1)*)', '((1*|1).e)', '((1|e)*.1)', '(1*|(1.e))',\
'(1.(e*|1))', '((e|1)*.1)', '(1.(1|e))*', '(e.(1|1*))', '(e.(1|1)*)',\
'((e|1*).1)', '(1*.(1|e))', '((1.e)|1*)', '((1.e)*|1)', '(e*.(1|1))',\
'((1.1)|e)*', '((1.1)*|e)', '(1|(e.1*))', '((e|1).1*)', '(e|(1.1*))',\
'(e|(1.1)*)', '((1.e)|1)*', '((1|e).1)*', '((1.1)|e*)', '((e|1).1)*',\
'(1|(e*.1))', '(1|(1.e*))', '(1|(1.e))*', '(1|(e.1)*)', '(e|(1*.1))',\
'(1*.(e|1))', '((e.1)|1*)', '((e.1)*|1)', '((1|1).e*)', '((e*|1).1)',\
'((e*.1)|1)', '((1|1)*.e)', '((1*.e)|1)', '((1.e*)|1)', '((1.1*)|e)',\
'((1|1*).e)', '((1*.1)|e)', '(1.(1|e)*)', '(e*|(1.1))', '((1|e*).1)',\
'(1*|(e.1))', '(1|(1*.e))', '(e.(1*|1))', '(1|(1.e)*)', '(1|(e.1))*'}
'''
# finds all the possible permutations of the regex string
perm_list = perm(s)
# intializes the set to be returned
regex_perms = set()
# for each element in the permutation list
for element in perm_list:
# checks whether its a valid regex and its not already in the set
if is_regex(element):
# adds it to the set
regex_perms.add(element)
return regex_perms
def perm(s):
'''(str) -> list
Takes in a string and returns all possible permutations of it in a list
>>> perm('abc')
['cba', 'bca', 'cab', 'acb', 'bac', 'abc']
>>> perm('uwot')
['towu', 'otwu', 'twou', 'wtou', 'owtu', 'wotu', 'touw', 'otuw', 'tuow',\
'utow', 'outw', 'uotw', 'twuo', 'wtuo', 'tuwo', 'utwo', 'wuto', 'uwto',\
'owut', 'wout', 'ouwt', 'uowt', 'wuot', 'uwot']
'''
if len(s) == 1:
return [s[0]]
# list to return
perms = []
# loop through entire string
for i in range(len(s)):
string = s[:i] + s[i + 1:]
# finds permutations of the short regex with an index missing
result = perm(string)
# adds the letter at the end of each permutation of the regexs
for element in range(len(result)):
# adds the missing symbol from the string to the permutation
result[element] += s[i]
# adds the permutation to the list to be returned
perms.append(result[element])
return perms
def regex_match(r, s):
'''(RegexTree, str) -> bool
Takes in a RegexTree rooted at r and a string and checks if the string
matches the regex, if it does, return True, else False
>>> tree = build_regex_tree('((0.(1*.2)*)|(e*.1))*')
>>> regex_match(tree, '01112121111112')
True
>>> tree = build_regex_tree('(1*.2)')
>>> regex_match(tree, '11111112')
True
>>> tree = build_regex_tree('((1.2**).(0**|2))')
>>> regex_match(tree, '12222200002')
False
'''
# base case
if isinstance(r, Leaf):
# if the leaf is an 'e'
if r.get_symbol() == 'e':
# returns true if the string is empty as e represents empty string
if s == '':
return True
# since string isnt empty, gives false
return False
# if the string is empty otherwise, it gives false
if s == '':
return False
# returns whether the string is equal to the symbol
return s == r.get_symbol()
# deals with dot trees
elif isinstance(r, DotTree):
# splits the string up from every index and sends it through to the
# left child and right child to check if they are both a string\
for i in range(len(s) + 1):
temp = regex_match(r.get_left_child(), s[:i]) and regex_match(
r.get_right_child(), s[i:])
# if both ever reach True, it will return True
if temp:
return True
return False
# deals with bar trees
elif isinstance(r, BarTree):
# checks if the entire string is equal to the regex left of the bar
left = regex_match(r.get_left_child(), s)
# checks if the entire string is equal to the regex right of the bar
right = regex_match(r.get_right_child(), s)
# if the left gives True, returns True
if left:
return True
# if the right gives True, returns True
if right:
return True
# if neither are True, then returns False
return False
# deals with star trees
else:
# if the string is empty, returns True
if s == '':
return True
# calls helper function
#return star_case(r, s)
if regex_match(r.get_child(), s):
return True
else:
if len(s) == 1:
return regex_match(r.get_child(), s)
return star_helper(r, s)
def star_helper(r, s):
'''(RegexTree, str) -> bool
This function takes the index where the part of the string matches the
regex tree, and call star case with everything after the string to see
if the rest of it matches, false if it does not match at all
>>> star_helper(StarTree(BarTree(Leaf('1'), Leaf('2'))), '12')
True
>>> star_helper(StarTree(BarTree(Leaf('1'), Leaf('2'))), '01')
False
'''
# finds the index where the shortened string is true
temp = star_index(r, s)
# if it gives false, then return false
if temp is False:
return False
# calls star case for everything past the shorted portion of the string
return regex_match(r, s[temp + 1:])
def star_index(r, s):
'''(RegexTree, str) -> int or bool
Returns the index where within the string, it keeps splicing the string
from left to right until it reaches a point where a smaller string within
the string matches, and returns that index, returns false if no shortened
version of the string matches the regex
>>> star_index(StarTree(BarTree(Leaf('1'), Leaf('2'))), '12')
0
>>> star_index(StarTree(DotTree(Leaf('1'), Leaf('2'))), '12222')
1
>>> star_index(StarTree(DotTree(Leaf('1'), Leaf('2'))), '012222')
False
'''
# starts from the index at the end of the string
i = len(s)
# tests keeps shortening string left to right until a True case is found
while not regex_match(r.get_child(), s[:i]):
# theres only 1 index in s but it already tested false, meaning it
# cannot match
if len(s[:i]) == 1:
return False
# reduces index by 1
i -= 1
return i - 1
def build_regex_tree(regex):
'''(str) -> RegexTree
Takes in a valid regex and creates the regex tree for it and returns the
rootof it.
REQ: regex must be a valid regex
>>> build_regex_tree('(2*|e)') == BarTree(StarTree(Leaf('2')), Leaf('e'))
True
>>> tree = build_regex_tree('(1.(e.2*)*)')
>>> tree == DotTree(Leaf('1'), StarTree(DotTree(Leaf('e'), StarTree(Leaf(\
'2')))))
True
'''
# base cases
if len(regex) == 1:
# when the regex is just '1', '2', '0', or 'e'
return Leaf(regex)
elif regex[len(regex) - 1] == '*':
# when the regex has an asterick at the end, starts with a StarTree
return StarTree(build_regex_tree(regex[:len(regex) - 1]))
else:
# finds location of the bar or dot in the regex
symbol = dot_bar_loca(regex)
# if the regex has a dot, creates a DotTree and recurses
if regex[symbol] == '.':
return DotTree(build_regex_tree(regex[1:symbol]
), build_regex_tree(regex[
symbol + 1:len(regex) - 1]))
# else it must be made with a BarTree and recurses
else:
return BarTree(build_regex_tree(regex[1:symbol]
), build_regex_tree(regex[
symbol + 1:len(regex) - 1]))
def dot_bar_loca(s, count=0, s_len=0):
'''(str) -> int or bool
Returns the location of the dot or bar after the first regex within the
string
>>> dot_bar_loca('(1|2)')
2
>>> dot_bar_loca('(e*.1)')
3
>>> dot_bar_loca('((2.e*)**|0)')
9
'''
if s == '':
return False
# keeps the original string's length
if s_len == 0:
s_len = len(s) - 1
# if this step is true, then the regex is not valid
if count == s_len:
return False
if len(s) == 1:
return False
# if there is a regex with brackets within the regex, recurses past that
if s[1] == '(':
temp = parenthesis(s[1:])
# should parenthesis return False, it means s is not a valid regex
if temp is False:
return False
count += temp + 1
return dot_bar_loca(s[count:], count, s_len)
# if the next index in the string is not '.' or '|', recurse past it
elif s[1] != '.' and s[1] != '|':
return dot_bar_loca(s[1:], count + 1, s_len)
# the desired value is found and returns it's index
else:
return count + 1
if __name__ == '__main__':
print(perm("123"))
|
venkatkorapaty/Regex
|
regex_functions.py
|
Python
|
gpl-2.0
| 12,678
|
[
"Brian"
] |
776134c43a9cf782f8c219f9199fd8f2bfd996a804f7f0d6d668a03e4dd2a704
|
import pytest
from capybara.exceptions import ExpectationNotMet
@pytest.mark.requires("css")
class TestAssertStyle:
def test_is_true_if_the_elements_style_contains_the_given_properties(self, session):
session.visit("/with_html")
assert session.find("css", "#first").assert_style({"display": "block"}) is True
def test_raises_error_if_the_elements_style_does_not_contain_the_given_properties(self, session):
session.visit("/with_html")
with pytest.raises(ExpectationNotMet) as excinfo:
session.find("css", "#first").assert_style({"display": "inline"})
assert (
"Expected node to have styles {'display': 'inline'}. "
"Actual styles were {'display': 'block'}") in str(excinfo.value)
@pytest.mark.requires("css", "js")
def test_waits_for_style(self, session):
session.visit("/with_js")
el = session.find("css", "#change")
session.click_link("Change size")
assert el.assert_style({"font-size": "50px"}, wait=3)
|
elliterate/capybara.py
|
capybara/tests/session/test_assert_style.py
|
Python
|
mit
| 1,035
|
[
"VisIt"
] |
97af117c499d6fb7c416c85522e7a24b615bb66e7bab4dcba77c5e08fadbb640
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "jai.rideout@gmail.com"
from os.path import split
from shutil import copy
from bfillings.formatdb import build_blast_db_from_fasta_path
from skbio.parse.sequences import parse_fasta
from qiime.identify_chimeric_seqs import make_cidx_file
from qiime.parse import parse_tmp_to_final_filepath_map_file
from qiime.util import write_degapped_fasta_to_file
from qiime.parallel.util import ParallelWrapper
class ParallelChimericSequenceIdentifier(ParallelWrapper):
_script_name = 'identify_chimeric_seqs.py'
_input_splitter = ParallelWrapper._split_fasta
_job_prefix = 'CHIM'
_process_run_results_f = \
'qiime.parallel.identify_chimeric_seqs.basic_process_run_results_f'
def _precommand_initiation(self, input_fp, output_dir, working_dir,
params):
if params['chimera_detection_method'] == 'blast_fragments':
blast_db, db_files_to_remove = \
build_blast_db_from_fasta_path(params['reference_seqs_fp'],
output_dir=working_dir)
self.files_to_remove += db_files_to_remove
params['blast_db'] = blast_db
elif params['chimera_detection_method'] == 'ChimeraSlayer':
# copy the reference files to working dir
# ChimeraSlayer creates an index file of the ref and
# will crash without write permission in the ref seqs dir
aligned_reference_seqs_fp = params['aligned_reference_seqs_fp']
_, new_ref_filename = split(aligned_reference_seqs_fp)
copy(aligned_reference_seqs_fp, working_dir)
aligned_reference_seqs_fp = working_dir + "/" + new_ref_filename
self.files_to_remove.append(aligned_reference_seqs_fp)
params['aligned_reference_seqs_fp'] = aligned_reference_seqs_fp
# if given, also copy the unaligned ref db
reference_seqs_fp = params['reference_seqs_fp']
if reference_seqs_fp:
_, new_ref_filename = split(reference_seqs_fp)
copy(reference_seqs_fp, working_dir)
reference_seqs_fp = working_dir + "/" + new_ref_filename
else:
# otherwise create it
reference_seqs_fp = write_degapped_fasta_to_file(
parse_fasta(open(aligned_reference_seqs_fp)),
tmp_dir=working_dir)
# delete it afterwards
self.files_to_remove.append(reference_seqs_fp)
params['reference_seqs_fp'] = reference_seqs_fp
# build blast db of reference, otherwise ChimeraSlayer will do it
# and parallel jobs clash
_, db_files_to_remove = \
build_blast_db_from_fasta_path(reference_seqs_fp)
self.files_to_remove += db_files_to_remove
# make the index file globally
# Reason: ChimeraSlayer first checks to see if the index file is
# there. If not it tries to create it. This can lead to race
# condition if several parallel jobs try to create it at the same
# time.
make_cidx_file(aligned_reference_seqs_fp)
self.files_to_remove.append(aligned_reference_seqs_fp + ".cidx")
else:
raise ValueError("Unrecognized chimera detection method '%s'." %
params['chimera_detection_method'])
def _get_job_commands(self, fasta_fps, output_dir, params, job_prefix,
working_dir, command_prefix='/bin/bash; ',
command_suffix='; exit'):
"""Generate identify_chimeric_seqs.py commands which should be run."""
# Create basenames for each of the output files. These will be filled
# in to create the full list of files created by all of the runs.
out_filenames = [job_prefix + '.%d_chimeric.txt']
# Create lists to store the results.
commands = []
result_filepaths = []
# Iterate over the input files.
for i, fasta_fp in enumerate(fasta_fps):
# Each run ends with moving the output file from the tmp dir to
# the output_dir. Build the command to perform the move here.
rename_command, current_result_filepaths = \
self._get_rename_command([fn % i for fn in out_filenames],
working_dir, output_dir)
result_filepaths += current_result_filepaths
optional_options = ""
if params['chimera_detection_method'] == 'blast_fragments':
command = \
'%s %s -i %s -t %s -m blast_fragments -o %s -n %s -d %s -e %s -b %s %s %s' % \
(command_prefix,
self._script_name,
fasta_fp,
params['id_to_taxonomy_fp'],
working_dir + "/" + out_filenames[0] % i,
params['num_fragments'],
params['taxonomy_depth'],
params['max_e_value'],
params['blast_db'],
rename_command,
command_suffix)
elif params['chimera_detection_method'] == 'ChimeraSlayer':
optional_options = ""
if params['min_div_ratio']:
optional_options += " --min_div_ratio %s" % \
params['min_div_ratio']
if params['reference_seqs_fp']:
optional_options += " -r %s" % params['reference_seqs_fp']
command = \
'%s %s -i %s -a %s -m ChimeraSlayer -o %s %s %s %s' % \
(command_prefix,
self._script_name,
fasta_fp,
params['aligned_reference_seqs_fp'],
working_dir + "/" + out_filenames[0] % i,
optional_options,
rename_command,
command_suffix)
else:
raise NotImplementedError
commands.append(command)
return commands, result_filepaths
def _get_poller_command(self,
expected_files_filepath,
merge_map_filepath,
deletion_list_filepath,
command_prefix='/bin/bash; ',
command_suffix='; exit'):
"""Generate command to initiate a poller to monitior/process completed runs
"""
result = '%s poller.py -f %s -p %s -m %s -d %s -t %d %s' % \
(command_prefix,
expected_files_filepath,
self._process_run_results_f,
merge_map_filepath,
deletion_list_filepath,
self._seconds_to_sleep,
command_suffix)
return result, []
def _write_merge_map_file(self, input_file_basename, job_result_filepaths,
params, output_dir, merge_map_filepath,
failures=False):
f = open(merge_map_filepath, 'w')
out_filepaths = [params['output_fp']]
chims_fps = []
logs_fps = [] # logs_fp currently not used
for fp in job_result_filepaths:
if fp.endswith('_chimeric.txt'):
chims_fps.append(fp)
else:
log_fps.append(fp)
for in_files, out_file in zip([chims_fps], out_filepaths):
f.write('\t'.join(in_files + [out_file]))
f.write('\n')
f.close()
def basic_process_run_results_f(f):
""" Copy each list of infiles to each outfile and delete infiles
f: file containing one set of mapping instructions per line
example f:
f1.txt f2.txt f3.txt f_combined.txt
f1.log f2.log f3.log f_combined.log
If f contained the two lines above, this function would
concatenate f1.txt, f2.txt, and f3.txt into f_combined.txt
and f1.log, f2.log, and f3.log into f_combined.log
"""
infiles_lists, out_filepaths = parse_tmp_to_final_filepath_map_file(f)
for infiles_list, out_filepath in zip(infiles_lists, out_filepaths):
try:
of = open(out_filepath, 'w')
except IOError:
raise IOError("Poller can't open final output file: %s" % out_filepath +
"\nLeaving individual jobs output.\n Do you have write access?")
for fp in infiles_list:
for line in open(fp):
of.write('%s\n' % line.strip('\n'))
of.close()
# It is a good idea to have your clean_up_callback return True.
# That way, if you get mixed up and pass it as check_run_complete_callback,
# you'll get an error right away rather than going into an infinite loop
return True
|
wasade/qiime
|
qiime/parallel/identify_chimeric_seqs.py
|
Python
|
gpl-2.0
| 9,174
|
[
"BLAST"
] |
20febf13053b39960b465b7329b633cb9bcac5bec1162d6b634ba10d95de801d
|
"""
Functional tests
~~~~~~~~~~~~~~~~
A functional test suite for running through the key actions in the admin UI.
Note that the best way to run this is by running ``make functional-test`` from
the project's root directory. If run directly (e.g., via ``nosetests
example``, the example app should already be running in another console.
.. note::Firefox Required
By default the functional tests run in Firefox, which means it needs
to be installed before attempting to run.
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import os
import shutil
from nose.tools import assert_true, assert_false
from splinter import Browser
from switchboard import configure
from switchboard.models import (
DISABLED,
SELECTIVE,
GLOBAL,
Switch,
)
configure()
url = 'http://localhost:8080/'
admin_url = url + '_switchboard/'
def assert_switch_active(browser, url=url):
browser.visit(url)
assert_true(browser.is_text_present('is active'),
'Switch is not active')
def assert_switch_inactive(browser, url=url):
browser.visit(url)
assert_true(browser.is_text_present('is NOT active'),
'Switch is not inactive')
def drop_datastore():
Switch.drop()
class TestAdmin(object):
@classmethod
def setup_class(cls):
cls.b = Browser()
# Ensure we're working with a clean slate.
drop_datastore()
@classmethod
def teardown_class(cls):
cls.b.quit()
def setup(self):
# Make sure the example switch is activated by at least
# one visit.
self.b.visit(url)
def teardown(self):
drop_datastore()
def test_root(self):
assert_switch_inactive(self.b)
def test_admin_index(self):
self.b.visit(admin_url)
assert_true(len(self.b.find_by_id('id_example')))
def test_change_status(self):
# Set the switch to global status and verify it's active.
self.b.visit(admin_url)
self.b.select('status_example', GLOBAL)
alert = self.b.get_alert()
alert.accept()
css = '#id_example[data-switch-status="{status}"]'
active_selector = css.format(status=GLOBAL)
is_status_updated = self.b.is_element_present_by_css(active_selector,
wait_time=10)
assert_true(is_status_updated, 'Switch status not updated')
self.b.visit(url)
assert_switch_active(self.b)
# Set the switch back to inactive and verify.
self.b.visit(admin_url)
self.b.select('status_example', DISABLED)
inactive_selector = css.format(status=DISABLED)
is_status_updated = self.b.is_element_present_by_css(inactive_selector,
wait_time=10)
assert_true(is_status_updated, 'Switch status not updated')
self.b.visit(url)
assert_switch_inactive(self.b)
def test_add_and_delete_condition(self):
self.b.visit(admin_url)
# Click the button.
switch = self.b.find_by_id('id_example').first
btn = switch.find_by_css('a[href="#add-condition"]')
btn.click()
# Setup a condition.
form = switch.find_by_css('.conditions-form').first
assert_true(form.visible, 'Add conditions form is not visible.')
condition_id = 'switchboard.builtins.QueryStringConditionSet,regex'
# Can't use select() here because it doesn't support <optgroup>.
css = 'select[name="{}"] option[value="{}"]'.format(
'condition',
condition_id,
)
form.find_by_css(css)._element.click()
css = '.fields[data-path="{}"]'.format(condition_id.replace(',', '.'))
field = form.find_by_css(css)
assert_true(field.visible, 'Condition field is not visible')
data_value = 'test'
field.find_by_name('regex').fill(data_value)
field.find_by_css('button[type="submit"]').first.click()
# Verify the condition has been created.
data_switch, data_field = condition_id.split(',')
condition_css = (
'#id_example ' +
'[data-switch="{}"][data-field="{}"][data-value="{}"]'
)
condition_css = condition_css.format(
data_switch,
data_field,
data_value,
)
is_created = self.b.is_element_present_by_css(condition_css,
wait_time=10)
assert_true(is_created, 'Condition was not created')
# Set the proper status.
self.b.select('status_example', SELECTIVE)
# Ensure that the switch is off when condition is not met...
assert_switch_inactive(self.b, url=url + '?foo')
# ...and on when the condition is met.
assert_switch_active(self.b, url=url + '?' + data_value)
# Delete the condition.
self.b.visit(admin_url)
cond = self.b.find_by_css(condition_css).first
cond.find_by_css('a[href="#delete-condition"]').first.click()
is_deleted = self.b.is_element_not_present_by_css(condition_css,
wait_time=10)
assert_true(is_deleted, 'Condition was not deleted')
# Verify that the switch is no longer active.
assert_switch_inactive(self.b, url=url + '?test')
def test_add_edit_delete_switch(self):
self.b.visit(admin_url)
# Add the switch.
self.b.find_link_by_href('#add-switch').first.click()
drawer = self.b.find_by_css('.drawer').first
assert_true(drawer.visible, 'Drawer is not visible')
key = 'test1'
drawer.find_by_css('input[name="key"]').first.fill(key)
drawer.find_by_css('a.submit-switch').first.click()
# Verify the addition.
is_added = self.b.is_element_present_by_css('#id_{}'.format(key),
wait_time=10)
assert_true(is_added, 'Switch was not added.')
assert_false(drawer.visible, 'Drawer is not hidden')
# Edit the switch.
self.show_switch_actions()
css = '#id_{} a[href="#edit-switch"]'.format(key)
self.b.find_by_css(css).first.click()
assert_true(drawer.visible, 'Drawer is not visible')
label = 'Foobar'
drawer.find_by_css('input[name="label"]').first.fill(label)
drawer.find_by_css('a.submit-switch').first.click()
# Verify the edit.
is_edited = self.b.is_text_present(label, wait_time=10)
assert_true(is_edited, 'Switch was not edited.')
assert_false(drawer.visible, 'Drawer is not hidden')
# Delete the switch.
self.show_switch_actions()
css = '#id_{} a[href="#delete-switch"]'.format(key)
self.b.find_by_css(css).first.click()
alert = self.b.get_alert()
alert.accept()
# Verify the deletion.
is_deleted = self.b.is_element_not_present_by_css('#id_{}'.format(key),
wait_time=10)
assert_true(is_deleted, 'Switch was not deleted.')
def show_switch_actions(self):
'''
Need to temporarily suspend show-on-hover in order to interact with
the links (can't click on links that are hidden).
'''
js = "$('.switches .actions').css('visibility', 'visible')"
self.b.execute_script(js)
|
kadams54/switchboard
|
example/tests.py
|
Python
|
apache-2.0
| 7,503
|
[
"VisIt"
] |
832412f3f710e1e7147fe6d7fe3457f9d2f13eae7398930192e764bbdabd3340
|
from toee import *
from utilities import *
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
game.global_flags[70] = 1
if (attachee.leader_get() != OBJ_HANDLE_NULL):
triggerer.begin_dialog( attachee, 350 ) ## otis in party
elif (game.global_vars[903] == 32 and attachee.map != 5051 and attachee.map != 5056):
triggerer.begin_dialog( attachee, 560 ) ## have attacked 3 or more farm animals with otis in party and not in nulb exterior or nulb smithy
elif ((triggerer.item_find(2202) != OBJ_HANDLE_NULL) or (triggerer.item_find(3008) != OBJ_HANDLE_NULL)):
triggerer.begin_dialog( attachee, 330 ) ## you have otis chainmail and-or longsword in inventory
elif ((game.quests[32].state == qs_completed) and (game.global_flags[74] == 0)):
triggerer.begin_dialog( attachee, 270 ) ## have completed bribery for profit quest and have not revealed otis secret
elif (game.quests[31].state == qs_completed):
triggerer.begin_dialog( attachee, 200 ) ## have completed a second trip for otis quest
elif (game.global_flags[73] == 1):
triggerer.begin_dialog( attachee, 120 ) ## otis has been to toee with you
elif (game.quests[63].state == qs_accepted):
triggerer.begin_dialog( attachee, 700 ) ## bribery for justice quest accepted
else:
triggerer.begin_dialog( attachee, 1 ) ## none of the above
return SKIP_DEFAULT
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
attachee.float_line(12014,triggerer)
if (attachee.leader_get() != OBJ_HANDLE_NULL):
game.global_vars[29] = game.global_vars[29] + 1
return RUN_DEFAULT
def san_enter_combat( attachee, triggerer ):
print "Otis Enter Combat"
if (triggerer.type == obj_t_pc):
print 'Triggered!!!'
leader = attachee.leader_get()
if (leader != OBJ_HANDLE_NULL):
leader.follower_remove(attachee)
elmo = find_npc_near( attachee, 8000)
if (elmo != OBJ_HANDLE_NULL):
attachee.float_line(380,triggerer)
leader = elmo.leader_get()
if (leader != OBJ_HANDLE_NULL):
leader.follower_remove(elmo)
elmo.attack(triggerer)
ProtectTheInnocent(attachee, triggerer)
return RUN_DEFAULT
def san_heartbeat( attachee, triggerer ):
#print "Otis heartbeat"
if (not game.combat_is_active()):
# if (game.global_vars[903] >= 3): #removed buggy animal farm scripting
# if (attachee != OBJ_HANDLE_NULL):
# leader = attachee.leader_get()
# if (leader != OBJ_HANDLE_NULL):
# leader.follower_remove(attachee)
# attachee.float_line(22000,triggerer)
if (game.global_flags[362] == 0):
for obj in game.obj_list_vicinity(attachee.location,OLC_PC):
if (is_safe_to_talk(attachee,obj)):
if ((obj.item_find(2202) != OBJ_HANDLE_NULL) or (obj.item_find(3008) != OBJ_HANDLE_NULL)):
obj.begin_dialog(attachee,330)
game.global_flags[362] = 1
return RUN_DEFAULT
if (game.global_flags[72] == 0):
#print "elmo script"
for obj in game.obj_list_vicinity(attachee.location,OLC_NPC):
if (obj.name == 8000):
#print "elmo found"
delPc = GetDelegatePc(attachee, 10)
if (delPc != OBJ_HANDLE_NULL):
delPc.begin_dialog(attachee,400)
return RUN_DEFAULT
if (game.global_flags[366] == 1):
leader = attachee.leader_get()
if (leader != OBJ_HANDLE_NULL):
attachee.turn_towards(leader)
attachee.float_line(12023,leader)
leader.follower_remove(attachee)
attachee.attack(leader)
game.global_flags[366] = 0
return RUN_DEFAULT
if (game.global_flags[367] == 1):
leader = attachee.leader_get()
if (leader != OBJ_HANDLE_NULL):
attachee.turn_towards(leader)
attachee.float_line(10014,leader)
leader.follower_remove(attachee)
game.global_flags[367] = 0
return RUN_DEFAULT
return RUN_DEFAULT
def san_join( attachee, triggerer ):
if (attachee.map == 5051):
for chest in game.obj_list_vicinity(attachee.location,OLC_CONTAINER):
if (chest.name == 1202):
chest.item_transfer_to( attachee, 2202 )
chest.item_transfer_to( attachee, 3008 )
chest.item_transfer_to( attachee, 12038 )
chest.item_transfer_to( attachee, 12040 )
reg_warhammer = attachee.item_find(4077)
if (reg_warhammer != OBJ_HANDLE_NULL):
reg_warhammer.destroy()
reg_chainmail = attachee.item_find(6019)
if (reg_chainmail != OBJ_HANDLE_NULL):
reg_chainmail.destroy()
attachee.item_wield_best_all()
mag_longsword = attachee.item_find(2202)
if mag_longsword != OBJ_HANDLE_NULL:
mag_longsword.item_flag_set(OIF_NO_TRANSFER)
mag_chainmail = attachee.item_find(3008)
if mag_chainmail != OBJ_HANDLE_NULL:
mag_chainmail.item_flag_set(OIF_NO_TRANSFER)
blu_sapph = attachee.item_find(12038)
blu_sapph.item_flag_set(OIF_NO_TRANSFER)
amber = attachee.item_find(12040)
amber.item_flag_set(OIF_NO_TRANSFER)
else:
itemA = attachee.item_find(4077)
if (itemA != OBJ_HANDLE_NULL):
itemA.destroy()
itemB = attachee.item_find(6019)
if (itemB != OBJ_HANDLE_NULL):
itemB.destroy()
mag_sword = create_item_in_inventory( 4122, attachee )
mag_sword.item_flag_set(OIF_NO_TRANSFER)
mag_armor = create_item_in_inventory( 6102, attachee )
mag_armor.item_flag_set(OIF_NO_TRANSFER)
blu_sapp1 = create_item_in_inventory( 12038, attachee )
blu_sapp1.item_flag_set(OIF_NO_TRANSFER)
blu_sapp2 = create_item_in_inventory( 12038, attachee )
blu_sapp2.item_flag_set(OIF_NO_TRANSFER)
amber1 = create_item_in_inventory( 12040, attachee )
amber1.item_flag_set(OIF_NO_TRANSFER)
amber2 = create_item_in_inventory( 12040, attachee )
amber2.item_flag_set(OIF_NO_TRANSFER)
amber3 = create_item_in_inventory( 12040, attachee )
amber3.item_flag_set(OIF_NO_TRANSFER)
amber4 = create_item_in_inventory( 12040, attachee )
amber4.item_flag_set(OIF_NO_TRANSFER)
amber5 = create_item_in_inventory( 12040, attachee )
amber5.item_flag_set(OIF_NO_TRANSFER)
attachee.item_wield_best_all()
return RUN_DEFAULT
def san_disband( attachee, triggerer ):
for obj in triggerer.group_list():
if (obj.name == 8021):
triggerer.follower_remove(obj)
if (obj.name == 8022):
triggerer.follower_remove(obj)
for pc in game.party:
attachee.ai_shitlist_remove( pc )
attachee.reaction_set( pc, 50 )
return RUN_DEFAULT
def san_new_map( attachee, triggerer ):
if ((attachee.map == 5066) or (attachee.map == 5067) or (attachee.map == 5105) or (attachee.map == 5079) or (attachee.map == 5080)):
game.global_flags[73] = 1
if (game.quests[31].state == qs_accepted):
game.quests[31].state = qs_completed
elif ((attachee.map == 5062) or (attachee.map == 5112)):
game.global_flags[73] = 1
leader = attachee.leader_get()
if (leader != OBJ_HANDLE_NULL):
if ((leader.stat_level_get(stat_alignment) == LAWFUL_EVIL) or (leader.stat_level_get(stat_alignment) == CHAOTIC_EVIL) or (leader.stat_level_get(stat_alignment) == NEUTRAL_EVIL)):
percent = group_percent_hp(leader)
if ((percent < 30) or (game.global_flags[74] == 1)):
game.global_flags[366] = 1
elif (attachee.map == 5051):
if (((game.global_flags[73] == 1) and (game.quests[31].state == qs_unknown)) or (game.quests[31].state == qs_completed)):
leader = attachee.leader_get()
if (leader != OBJ_HANDLE_NULL):
game.global_flags[367] = 1
return RUN_DEFAULT
def make_elmo_talk( attachee, triggerer, line):
npc = find_npc_near(attachee,8000)
if (npc != OBJ_HANDLE_NULL):
triggerer.begin_dialog(npc,line)
npc.turn_towards(attachee)
attachee.turn_towards(npc)
else:
triggerer.begin_dialog(attachee,410)
return SKIP_DEFAULT
def make_saduj_talk( attachee, triggerer, line):
npc = find_npc_near(attachee,14689)
if (npc != OBJ_HANDLE_NULL):
triggerer.begin_dialog(npc,line)
npc.turn_towards(attachee)
attachee.turn_towards(npc)
return SKIP_DEFAULT
def talk_to_screng( attachee, triggerer, line):
npc = find_npc_near(attachee,8021)
if (npc != OBJ_HANDLE_NULL):
triggerer.begin_dialog(npc,line)
npc.turn_towards(attachee)
attachee.turn_towards(npc)
else:
triggerer.begin_dialog(attachee,470)
return SKIP_DEFAULT
def make_lila_talk( attachee, triggerer, line):
npc = find_npc_near(attachee,14001)
if (npc != OBJ_HANDLE_NULL):
triggerer.begin_dialog(npc,line)
npc.turn_towards(attachee)
attachee.turn_towards(npc)
else:
triggerer.begin_dialog(attachee,610)
return SKIP_DEFAULT
def switch_to_thrommel( attachee, triggerer):
npc = find_npc_near(attachee,8031)
if (npc != OBJ_HANDLE_NULL):
triggerer.begin_dialog(npc,40)
npc.turn_towards(attachee)
attachee.turn_towards(npc)
else:
triggerer.begin_dialog(attachee,510)
return SKIP_DEFAULT
def chain_it( attachee, triggerer ):
itemA = attachee.item_find(2202)
if (itemA != OBJ_HANDLE_NULL):
itemA.item_flag_unset(OIF_NO_TRANSFER)
itemB = attachee.item_find(3008)
if (itemB != OBJ_HANDLE_NULL):
itemB.item_flag_unset(OIF_NO_TRANSFER)
return
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/co8fixes/scr/py00097otis.py
|
Python
|
mit
| 8,857
|
[
"Amber"
] |
aea5d95d26c280a01a87027956e287a8c7a0f930dfe5cd60dddb5461fd1022ce
|
"""A grid plane component.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2006, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Enum, Int, Range
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
from apptools.persistence import state_pickler
# Local imports.
from mayavi.core.component import Component
from mayavi.core.common import error
def _get_extent(inp):
"""Get the extents from the given input.
"""
d = inp.dimensions
return [0, d[0]-1, 0, d[1]-1, 0, d[2]-1]
######################################################################
# `GridPlane` class.
######################################################################
class GridPlane(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The TVTK object that extracts the grid plane. This is created
# dynamically based on the input data type.
plane = Instance(tvtk.Object)
# The axis which is normal to the plane chosen.
axis = Enum('x', 'y', 'z',
desc='specifies the axis normal to the grid plane')
# The position of the grid plane.
position = Range(value=0, low='_low', high='_high',
enter_set=True, auto_set=False)
########################################
# Private traits.
# Determines the lower limit of the position trait and is always 0.
_low = Int(0)
# Determines the upper limit of the position trait. The value is
# dynamically set depending on the input data and state of the
# axis trait. The default is some large value to avoid errors in
# cases where the user may set the position before adding the
# object to the mayavi tree.
_high = Int(10000)
########################################
# View related traits.
# The View for this object.
view = View(Group(Item(name='axis'),
Item(name='position', enabled_when='_high > 0'))
)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(GridPlane, self).__get_pure_state__()
# These traits are dynamically created.
for name in ('plane', '_low', '_high'):
d.pop(name, None)
return d
def __set_pure_state__(self, state):
state_pickler.set_state(self, state)
self._position_changed(self.position)
######################################################################
# `Component` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* its tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters.
"""
pass
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if len(self.inputs) == 0:
return
input = self.inputs[0].outputs[0]
plane = None
if input.is_a('vtkStructuredGrid'):
plane = tvtk.StructuredGridGeometryFilter()
elif input.is_a('vtkStructuredPoints') or input.is_a('vtkImageData'):
plane = tvtk.ImageDataGeometryFilter ()
elif input.is_a('vtkRectilinearGrid'):
plane = tvtk.RectilinearGridGeometryFilter ()
else:
msg = "The GridPlane component does not support the %s dataset."\
%(input.class_name)
error(msg)
raise TypeError, msg
self.configure_connection(plane, self.inputs[0])
self.plane = plane
self.plane.update()
self.outputs = [plane.output]
self._update_limits()
self._update_extents()
# If the data is 2D make sure that we default to the
# appropriate axis.
extents = list(_get_extent(input))
diff = [y-x for x, y in zip(extents[::2], extents[1::2])]
if diff.count(0) > 0:
self.axis = ['x', 'y', 'z'][diff.index(0)]
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self._update_limits()
self._update_extents()
# Propagate the data_changed event.
self.data_changed = True
def has_output_port(self):
""" The filter has an output port."""
return True
def get_output_object(self):
""" Returns the output port."""
return self.plane.output_port
######################################################################
# Non-public methods.
######################################################################
def _get_axis_index(self):
return {'x':0, 'y':1, 'z':2}[self.axis]
def _update_extents(self):
inp = self.plane.input
extents = list(_get_extent(inp))
pos = self.position
axis = self._get_axis_index()
extents[2*axis] = pos
extents[2*axis+1] = pos
try:
self.plane.set_extent(extents)
except AttributeError:
self.plane.extent = extents
def _update_limits(self):
extents = _get_extent(self.plane.input)
axis = self._get_axis_index()
pos = min(self.position, extents[2*axis+1])
self._high = extents[2*axis+1]
return pos
def _axis_changed(self, val):
if len(self.inputs) == 0:
return
pos = self._update_limits()
if self.position == pos:
self._update_extents()
self.data_changed = True
else:
self.position = pos
def _position_changed(self, val):
if len(self.inputs) == 0:
return
self._update_extents()
self.data_changed = True
|
liulion/mayavi
|
mayavi/components/grid_plane.py
|
Python
|
bsd-3-clause
| 6,556
|
[
"Mayavi"
] |
8eecbb26a55a3416fb01ee8f1c06a23da012f9eec068cb767b2ae3dd4784e20f
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
import os
import numpy as np
import shutil
from pymatgen.io.vaspio_set import MITVaspInputSet, MITHSEVaspInputSet, \
MPVaspInputSet, MITGGAVaspInputSet, MITNEBVaspInputSet,\
MPStaticVaspInputSet, MPNonSCFVaspInputSet, MITMDVaspInputSet,\
MPHSEVaspInputSet, MPBSHSEVaspInputSet, MPStaticDielectricDFPTVaspInputSet,\
MPOpticsNonSCFVaspInputSet
from pymatgen.io.vaspio.vasp_input import Poscar, Incar
from pymatgen import Specie, Lattice, Structure
from monty.json import MontyDecoder
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
dec = MontyDecoder()
class MITMPVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitparamset = MITVaspInputSet()
self.mitparamset_unsorted = MITVaspInputSet(sort_structure=False)
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
self.mitggaparam = MITGGAVaspInputSet()
self.mpstaticparamset = MPStaticVaspInputSet()
self.mpnscfparamsetu = MPNonSCFVaspInputSet(
{"NBANDS": 50}, mode="Uniform")
self.mpnscfparamsetl = MPNonSCFVaspInputSet(
{"NBANDS": 60}, mode="Line")
self.mphseparamset = MPHSEVaspInputSet()
self.mpbshseparamsetl = MPBSHSEVaspInputSet(mode="Line")
self.mpbshseparamsetu = MPBSHSEVaspInputSet(
mode="Uniform", added_kpoints=[[0.5, 0.5, 0.0]])
self.mpdielparamset = MPStaticDielectricDFPTVaspInputSet()
def test_get_poscar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
s_unsorted = self.mitparamset_unsorted.get_poscar(struct).structure
s_sorted = self.mitparamset.get_poscar(struct).structure
self.assertEqual(s_unsorted[0].specie.symbol, 'Fe')
self.assertEqual(s_sorted[0].specie.symbol, 'Mn')
def test_get_potcar_symbols(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
syms = self.paramset.get_potcar_symbols(struct)
self.assertEqual(syms, ['Fe_pv', 'P', 'O'])
syms = MPVaspInputSet(sort_structure=False).get_potcar_symbols(struct)
self.assertEqual(syms, ['P', 'Fe_pv', 'O'])
def test_false_potcar_hash(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe_pv'
self.assertRaises(ValueError, self.mitparamset.get_potcar, struct, check_hash=True)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe'
def test_lda_potcar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe"], coords)
p = MITVaspInputSet(potcar_functional="LDA").get_potcar(struct)
self.assertEqual(p.functional, 'LDA')
def test_get_nelect(self):
coords = [[0]*3, [0.5]*3, [0.75]*3]
lattice = Lattice.cubic(4)
s = Structure(lattice, ['Si', 'Si', 'Fe'], coords)
self.assertAlmostEqual(MITVaspInputSet().get_nelect(s), 16)
def test_get_incar(self):
incar = self.paramset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [5.3, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar = self.mitparamset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar_gga = self.mitggaparam.get_incar(self.struct)
self.assertNotIn("LDAU", incar_gga)
incar_static = self.mpstaticparamset.get_incar(self.struct)
self.assertEqual(incar_static["NSW"], 0)
incar_nscfl = self.mpnscfparamsetl.get_incar(self.struct)
self.assertEqual(incar_nscfl["NBANDS"], 60)
incar_nscfu = self.mpnscfparamsetu.get_incar(self.struct)
self.assertEqual(incar_nscfu["ISYM"], 0)
incar_hse = self.mphseparamset.get_incar(self.struct)
self.assertEqual(incar_hse['LHFCALC'], True)
self.assertEqual(incar_hse['HFSCREEN'], 0.2)
incar_hse_bsl = self.mpbshseparamsetl.get_incar(self.struct)
self.assertEqual(incar_hse_bsl['LHFCALC'], True)
self.assertEqual(incar_hse_bsl['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsl['NSW'], 0)
incar_hse_bsu = self.mpbshseparamsetu.get_incar(self.struct)
self.assertEqual(incar_hse_bsu['LHFCALC'], True)
self.assertEqual(incar_hse_bsu['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsu['NSW'], 0)
incar_diel = self.mpdielparamset.get_incar(self.struct)
self.assertEqual(incar_diel['IBRION'], 8)
self.assertEqual(incar_diel['LEPSILON'], True)
si = 14
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
#Silicon structure for testing.
latt = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(latt, [si, si], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn("LDAU", incar)
incar = self.mithseparamset.get_incar(self.struct)
self.assertTrue(incar['LHFCALC'])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn('LDAU', incar)
#check fluorides
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
self.assertEqual(incar['MAGMOM'], [5, 0.6])
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0])
#Make sure this works with species.
struct = Structure(lattice, ["Fe2+", "O2-"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
struct = Structure(lattice, ["Fe", "Mn"], coords,
site_properties={'magmom': (5.2, -4.5)})
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mpstaticparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mitparamset_unsorted.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5.2, -4.5])
struct = Structure(lattice, [Specie("Fe", 2, {'spin': 4.1}), "Mn"],
coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5, 4.1])
incar = self.mpnscfparamsetl.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
struct = Structure(lattice, ["Mn3+", "Mn4+"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [4, 3])
incar = self.mpnscfparamsetu.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[100, 0.6])
#sulfide vs sulfate test
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.25, 0.5, 0])
struct = Structure(lattice, ["Fe", "Fe", "S"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [1.9, 0])
#Make sure Matproject sulfides are ok.
self.assertNotIn('LDAUU', self.paramset.get_incar(struct))
self.assertNotIn('LDAUU', self.mpstaticparamset.get_incar(struct))
struct = Structure(lattice, ["Fe", "S", "O"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
#Make sure Matproject sulfates are ok.
self.assertEqual(self.paramset.get_incar(struct)['LDAUU'], [5.3, 0, 0])
self.assertEqual(self.mpnscfparamsetl.get_incar(struct)['LDAUU'],
[5.3, 0, 0])
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[10, -5, 0.6])
def test_optics(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
self.mpopticsparamset = MPOpticsNonSCFVaspInputSet.from_previous_vasp_run(
'{}/static_silicon'.format(test_dir), output_dir='optics_test_dir',
nedos=1145)
self.assertTrue(os.path.exists('optics_test_dir/CHGCAR'))
incar = Incar.from_file('optics_test_dir/INCAR')
self.assertTrue(incar['LOPTICS'])
self.assertEqual(incar['NEDOS'], 1145)
#Remove the directory in which the inputs have been created
shutil.rmtree('optics_test_dir')
def test_get_kpoints(self):
kpoints = self.paramset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mitparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpstaticparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[6, 6, 4]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpnscfparamsetl.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 140)
self.assertEqual(kpoints.style, 'Reciprocal')
kpoints = self.mpnscfparamsetu.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 168)
kpoints = self.mpbshseparamsetl.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 164)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.66006924)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.51780182)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.30173482)
kpoints = self.mpbshseparamsetu.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 25)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.0)
def test_get_all_vasp_input(self):
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], -5)
self.struct.make_supercell(4)
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], 0)
def test_to_from_dict(self):
self.mitparamset = MITVaspInputSet()
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
d = self.mitparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 4)
d = self.mitggaparam.as_dict()
v = dec.process_decoded(d)
self.assertNotIn("LDAUU", v.incar_settings)
d = self.mithseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.mphseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.paramset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 5.3)
d = self.userparamset.as_dict()
v = dec.process_decoded(d)
#self.assertEqual(type(v), MPVaspInputSet)
self.assertEqual(v.incar_settings["MAGMOM"],
{"Fe": 10, "S": -5, "Mn3+": 100})
class MITMDVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitmdparam = MITMDVaspInputSet(300, 1200, 10000)
def test_get_potcar_symbols(self):
syms = self.mitmdparam.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.mitmdparam.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 2.4e-5)
def test_get_kpoints(self):
kpoints = self.mitmdparam.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, 'Gamma')
def test_to_from_dict(self):
d = self.mitmdparam.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MITMDVaspInputSet)
self.assertEqual(v.incar_settings["TEBEG"], 300)
class MITNEBVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.vis = MITNEBVaspInputSet(nimages=10, hubbard_off=True)
def test_get_potcar_symbols(self):
syms = self.vis.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.vis.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 0.00005)
def test_get_kpoints(self):
kpoints = self.vis.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
def test_to_from_dict(self):
d = self.vis.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["IMAGES"], 10)
def test_write_inputs(self):
c1 = [[0.5] * 3, [0.9] * 3]
c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]
s1 = Structure(Lattice.cubic(5), ['Si', 'Si'], c1)
s2 = Structure(Lattice.cubic(5), ['Si', 'Si'], c2)
structs = []
for s in s1.interpolate(s2, 3, pbc=True):
structs.append(Structure.from_sites(s.sites,
to_unit_cell=True))
fc = self.vis._process_structures(structs)[2].frac_coords
self.assertTrue(np.allclose(fc, [[0.5]*3,[0.9, 1.033333, 1.0333333]]))
if __name__ == '__main__':
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/io/tests/test_vaspio_set.py
|
Python
|
mit
| 16,493
|
[
"pymatgen"
] |
9ca90a755e867ebae8c4b4336629e62ef19f0b7227d10720031ba4b371fea60d
|
#!flask/bin/python
from app import app, models, db
from flask.ext.sqlalchemy import SQLAlchemy
import datetime
import os, json
from app.models import User
from app.models import Visits
from app.models import Beacon
import random
beacon1 = Beacon(beacon_identifier="Fake1", location="Google X Labs", picture='/static/images/google.jpg')
beacon2 = Beacon(beacon_identifier="Fake2", location="Google Cafeteria", picture='/static/images/googlecafe.jpg')
beacon3 = Beacon(beacon_identifier="Fake3", location="Aperture Laboratories", picture='/static/images/aperture.jpg')
real_beacon1 = Beacon(beacon_identifier="b9407f30-f5f8-466e-aff9-25556b57fe6d991", location="Geekdom SF", picture='/static/images/geekdom.jpg')
real_beacon2 = Beacon(beacon_identifier="b9407f30-f5f8-466e-aff9-25556b57fe6d992", location="Geekdom SF Presentation Room", picture='/static/images/hall.jpg')
db.session.add(real_beacon1)
db.session.add(real_beacon2)
db.session.add(beacon1)
db.session.add(beacon2)
db.session.add(beacon3)
admin_user = User(mail='admin@admin.com', name='Admin', role=1)
db.session.add(admin_user)
user1 = User(mail='sergey@google.com', name='Sergey Brin', picture='/static/images/sergey.jpg', role=0)
db.session.add(user1)
user2 = User(mail='larry@google.com', name='Larry Page', picture='/static/images/larry.jpg', role=0)
db.session.add(user2)
user3 = User(mail='ray@google.com', name='Ray Kurzweil', picture='/static/images/ray.jpg', role=0)
db.session.add(user3)
brett = User(mail='brett@linkedin.com', name='Brett Sun', picture='/static/images/brett.jpg', role=0)
db.session.add(brett)
tim = User(mail='tim@linkedin.com', name='Tim Pei', picture='/static/images/tim.jpg', role=0)
db.session.add(tim)
vlad = User(mail='vlad@linkedin.com', name='Vlad Lyubinets', picture='/static/images/vlad.jpg', role=0)
db.session.add(vlad)
db.session.commit()
# Google visits
#visit2 = Visits(user_id=user2.id, beacon_id=beacon1.id, time_entered=datetime.datetime(2014, 03, 2, 11, 45, 0), time_left=None)
#visit3 = Visits(user_id=user3.id, beacon_id=beacon3.id, time_entered=datetime.datetime(2014, 03, 2, 12, 45, 0), time_left=None)
#visit4 = Visits(user_id=user1.id, beacon_id=beacon1.id, time_entered=datetime.datetime(2014, 03, 1, 11, 30, 0), time_left=datetime.datetime(2014, 03, 1, 15, 30, 0))
#visit5 = Visits(user_id=user1.id, beacon_id=beacon2.id, time_entered=datetime.datetime(2014, 03, 1, 16, 10, 0), time_left=datetime.datetime(2014, 03, 1, 17, 0, 0))
#db.session.add(visit1)
#db.session.add(visit2)
#db.session.add(visit3)
#db.session.add(visit4)
#db.session.add(visit5)
for i in xrange(20):
visit = Visits(user_id=user1.id, beacon_id=beacon1.id, time_entered=datetime.datetime(2014, 02, i + 2, random.randint(9, 11), random.randint(10, 50), random.randint(1, 59)), time_left=datetime.datetime(2014, 02, i+2, random.randint(16, 18), random.randint(1, 55), random.randint(1, 59)))
db.session.add(visit)
db.session.commit()
if random.randint(1, 5) <= 2:
visit = Visits(user_id=user1.id, beacon_id=beacon2.id, time_entered=datetime.datetime(2014, 02, i + 2, random.randint(19, 20), random.randint(10, 20), random.randint(1, 59)), time_left=datetime.datetime(2014, 02, i+2, 20, random.randint(30, 55), random.randint(1, 59)))
db.session.add(visit)
db.session.commit()
if random.randint(1, 2) == 1:
visit = Visits(user_id=user2.id, beacon_id=beacon2.id, time_entered=datetime.datetime(2014, 02, i + 2, 8, random.randint(5, 20), random.randint(1, 59)), time_left=datetime.datetime(2014, 02, i+2, 8, random.randint(40, 55), random.randint(1, 59)))
db.session.add(visit)
db.session.commit()
visit = Visits(user_id=user2.id, beacon_id=beacon1.id, time_entered=datetime.datetime(2014, 02, i + 2, random.randint(9, 10), random.randint(10, 20), random.randint(1, 59)), time_left=datetime.datetime(2014, 02, i+2, random.randint(16, 17), random.randint(10, 55), random.randint(1, 59)))
db.session.add(visit)
db.session.commit()
if random.randint(1, 4) != 4:
visit = Visits(user_id=user2.id, beacon_id=beacon2.id, time_entered=datetime.datetime(2014, 02, i + 2, 18, random.randint(5, 20), random.randint(1, 59)), time_left=datetime.datetime(2014, 02, i+2, 18, random.randint(40, 55), random.randint(1, 59)))
db.session.add(visit)
db.session.commit()
if random.randint(1, 2) == 1:
visit = Visits(user_id=user3.id, beacon_id=beacon3.id, time_entered=datetime.datetime(2014, 02, i + 2, random.randint(11, 14), random.randint(5, 50), random.randint(1, 59)), time_left=datetime.datetime(2014, 02, i+2, random.randint(18, 19), random.randint(40, 55), random.randint(1, 59)))
db.session.add(visit)
db.session.commit()
# RackSpace visits
visit1 = Visits(user_id=tim.id, beacon_id=real_beacon1.id, time_entered=datetime.datetime(2014, 03, 1, 11, 25, 0), time_left=datetime.datetime(2014, 03, 1, 21, 30, 0))
visit21 = Visits(user_id=brett.id, beacon_id=real_beacon1.id, time_entered=datetime.datetime(2014, 03, 1, 12, 35, 0), time_left=datetime.datetime(2014, 03, 1, 13, 45, 0))
visit22 = Visits(user_id=brett.id, beacon_id=real_beacon2.id, time_entered=datetime.datetime(2014, 03, 1, 13, 52, 0), time_left=datetime.datetime(2014, 03, 1, 19, 10, 0))
visit23 = Visits(user_id=brett.id, beacon_id=real_beacon1.id, time_entered=datetime.datetime(2014, 03, 1, 19, 33, 0), time_left=datetime.datetime(2014, 03, 1, 21, 32, 0))
visit3 = Visits(user_id=vlad.id, beacon_id=real_beacon1.id, time_entered=datetime.datetime(2014, 03, 1, 11, 00, 0), time_left=datetime.datetime(2014, 03, 1, 21, 30, 0))
visit01 = Visits(user_id=tim.id, beacon_id=real_beacon1.id, time_entered=datetime.datetime(2014, 03, 2, 11, 25, 0), time_left=datetime.datetime(2014, 03, 1, 16, 30, 0))
visit02 = Visits(user_id=brett.id, beacon_id=real_beacon1.id, time_entered=datetime.datetime(2014, 03, 2, 11, 20, 0), time_left=datetime.datetime(2014, 03, 1, 16, 29, 0))
visit03 = Visits(user_id=vlad.id, beacon_id=real_beacon1.id, time_entered=datetime.datetime(2014, 03, 2, 11, 47, 0), time_left=datetime.datetime(2014, 03, 1, 16, 21, 0))
db.session.add(visit01)
db.session.add(visit02)
db.session.add(visit03)
visit4 = Visits(user_id=tim.id, beacon_id=real_beacon2.id, time_entered=datetime.datetime(2014, 03, 2, 16, 32, 0), time_left=None)
visit5 = Visits(user_id=brett.id, beacon_id=real_beacon2.id, time_entered=datetime.datetime(2014, 03, 2, 16, 33, 0), time_left=None)
visit6 = Visits(user_id=vlad.id, beacon_id=real_beacon2.id, time_entered=datetime.datetime(2014, 03, 2, 16, 34, 0), time_left=None)
db.session.add(visit1)
db.session.add(visit3)
db.session.add(visit21)
db.session.add(visit22)
db.session.add(visit23)
db.session.add(visit4)
db.session.add(visit5)
db.session.add(visit6)
#db.session.add(visit11)
#db.session.add(visit12)
#db.session.add(visit13)
#db.session.add(visit14)
#db.session.add(visit15)
#db.session.add(visit16)
visitg = Visits(user_id=user1.id, beacon_id=beacon1.id, time_entered=datetime.datetime(2014, 03, 2, 9, 30, 0), time_left=None)
db.session.add(visitg)
db.session.commit()
|
SohKai/ChronoLogger
|
web/db_populate.py
|
Python
|
mit
| 7,015
|
[
"VisIt"
] |
b66d4e84b217f12de7da6d05a94628d811ab7954f7d5598368e21b811dc5fba8
|
# -*- coding: utf-8 -*-
"""
Tests for bandwidth selection and calculation.
Author: Padarn Wilson
"""
import numpy as np
from scipy import stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.distributions.mixture_rvs import mixture_rvs
from statsmodels.nonparametric.bandwidths import select_bandwidth
from statsmodels.nonparametric.bandwidths import bw_normal_reference
from numpy.testing import assert_allclose
import pytest
# setup test data
np.random.seed(12345)
Xi = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
class TestBandwidthCalculation(object):
def test_calculate_bandwidth_gaussian(self):
bw_expected = [0.29774853596742024,
0.25304408155871411,
0.29781147113698891]
kern = kernels.Gaussian()
bw_calc = [0, 0, 0]
for ii, bw in enumerate(['scott','silverman','normal_reference']):
bw_calc[ii] = select_bandwidth(Xi, bw, kern)
assert_allclose(bw_expected, bw_calc)
def test_calculate_normal_reference_bandwidth(self):
# Should be the same as the Gaussian Kernel
bw_expected = 0.29781147113698891
bw = bw_normal_reference(Xi)
assert_allclose(bw, bw_expected)
class CheckNormalReferenceConstant(object):
def test_calculate_normal_reference_constant(self):
const = self.constant
kern = self.kern
assert_allclose(const, kern.normal_reference_constant, 1e-2)
class TestEpanechnikov(CheckNormalReferenceConstant):
kern = kernels.Epanechnikov()
constant = 2.34
class TestGaussian(CheckNormalReferenceConstant):
kern = kernels.Gaussian()
constant = 1.06
class TestBiweight(CheckNormalReferenceConstant):
kern = kernels.Biweight()
constant = 2.78
class TestTriweight(CheckNormalReferenceConstant):
kern = kernels.Triweight()
constant = 3.15
class BandwidthZero(object):
def test_bandwidth_zero(self):
kern = kernels.Gaussian()
for bw in ['scott', 'silverman', 'normal_reference']:
with pytest.raises(RuntimeError,
match="Selected KDE bandwidth is 0"):
select_bandwidth(self.xx, bw, kern)
class TestAllBandwidthZero(BandwidthZero):
xx = np.ones((100, 3))
class TestAnyBandwidthZero(BandwidthZero):
xx = np.random.normal(size=(100, 3))
xx[:, 0] = 1.0
|
statsmodels/statsmodels
|
statsmodels/nonparametric/tests/test_bandwidths.py
|
Python
|
bsd-3-clause
| 2,495
|
[
"Gaussian"
] |
f4fbd3c7a0346c20662fc3d13dfe98661f23bb0ff03f050b87c7835310933654
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Converts query results from SpaceCurve System into strict GeoJSON.
#
# for Python 2.7
#
#
# For help:
#
# python sc-to-geojson.py --help
#
# and visit: https://github.com/SpaceCurve/schema-discovery
#
# @copyright (C) SpaceCurve, Inc. 2015
import os
import re
import sys
import csv
from optparse
import OptionParser
linecleaner = re.compile('\[\{.+\},(\{.+\})\]')
def parseargs():
""" Parse command line arguments """
parser = OptionParser()
parser.add_option('-i', '--in', dest = "in_file", type = "string", default = None, help = "name of input file to process")
parser.add_option('-o', '--out', dest = "out_file", type = "string", default = None, help = "name of output file to write")
opt, arg = parser.parse_args()
if not(opt.in_file or opt.out_file):
print "Error: must specify an --in and an --out file"
sys.exit(1)
return opt, arg
if __name__ == "__main__":
options, args = parseargs()
infh = open(options.in_file, 'r')
outfh = open(options.out_file, 'w')
outfh.write('{"type": "FeatureCollection", "features": [\r\n')
for line in infh.readlines():
m = linecleaner.match(line)
if m:
templine = m.groups()[0]
else:
templine = line.rstrip()
outfh.write(templine + ',\r\n')
outfh.write('] }')
|
SpaceCurve/schema-discovery
|
sc-to-geojson.py
|
Python
|
apache-2.0
| 1,373
|
[
"VisIt"
] |
d06bed7586eacddfaf5f7109c99e5a2fb256bf03c5c47fdd82342939faf448cc
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 07 11:09:10 2016
@author: Radu
"""
from neuron import h
from math import sin, cos
class BallAndStick(object):
"""Two-section cell: A soma with active channels and
a dendrite with passive properties."""
def __init__(self):
self.x = self.y = self.z = 0
self.create_sections()
self.build_topology()
self.build_subsets()
self.define_geometry()
self.define_biophysics()
#
def create_sections(self):
"""Create the sections of the cell."""
self.soma = h.Section(name='soma', cell=self)
self.dend = h.Section(name='dend', cell=self)
#
def build_topology(self):
"""Connect the sections of the cell to build a tree."""
self.dend.connect(self.soma(1))
#
def define_geometry(self):
"""Set the 3D geometry of the cell."""
self.soma.L = 35 # microns
self.soma.diam = 25 # microns
self.dend.L = 350 # microns
self.dend.diam = 3 # microns
self.dend.nseg = 9
self.shape_3D() #### Was h.define_shape(), now we do it.
#
def define_biophysics(self):
"""Assign the membrane properties across the cell."""
for sec in self.all: # 'all' exists in parent object.
sec.Ra = 70 # Axial resistance in Ohm * cm
sec.cm = 1 # Membrane capacitance in micro Farads / cm^2
# Insert active Hodgkin-Huxley current in the soma
self.dap_syn_ = h.Exp2Syn(self.soma(0.5))
self.dap_syn_.tau1 = 2
self.dap_syn_.tau2 = 5
self.dap_syn_.e = 50
self.dap_nc_ = h.NetCon(self.soma(0.5)._ref_v,\
self.dap_syn_, sec=self.soma)
self.dap_nc_.delay = 0
self.dap_nc_.threshold = 10
self.dend_syn= h.Exp2Syn(self.dend(0.5))
self.dend_syn.tau1 = 2
self.dend_syn.tau2 = 5
self.dend_syn.e = 0
self.soma.insert('pas')
# FINAL VERSIONS
self.soma.g_pas = 2e-3
self.soma.e_pas = -66
# SWEEP VALUES
#self.soma.gcaN_clarke = 0
#self.soma.gcaL_clarke = 0
#self.soma.gcak_clarke = 0
#self.soma.gnapbar_clarke = 0
#self.soma.tau_mc_clarke = 0
#self.soma.tau_hc_clarke = 0
#self.soma.tau_n_bar_clarke = 0
#self.dap_nc_.weight[0] = 0
#self.soma.gkrect_clarke = 0
#self.soma.gnabar_clarke = 0
#self.soma.insert('pas')
#self.soma.g_pas = 1e-3 # Passive conductance in S/cm2
#self.soma.e_pas = -64 # Leak reversal potential mV
#self.soma.insert('extracellular')
# Insert passive current in the dendrite
self.dend.insert('pas')
self.dend.g_pas = 0.001 # Passive conductance in S/cm2
self.dend.e_pas = -54.3 # Leak reversal potential mV
#
def build_subsets(self):
"""Build subset lists. For now we define 'all'."""
self.all = h.SectionList()
self.all.wholetree(sec=self.soma)
#
#### NEW STUFF ADDED ####
#
def shape_3D(self):
"""
Set the default shape of the cell in 3D coordinates.
Set soma(0) to the origin (0,0,0) and dend extending along
the X-axis.
"""
len1 = self.soma.L
h.pt3dclear(sec=self.soma)
h.pt3dadd(0, 0, 0, self.soma.diam, sec=self.soma)
h.pt3dadd(len1, 0, 0, self.soma.diam, sec=self.soma)
len2 = self.dend.L
h.pt3dclear(sec=self.dend)
h.pt3dadd(len1, 0, 0, self.dend.diam, sec=self.dend)
h.pt3dadd(len1 + len2, 0, 0, self.dend.diam, sec=self.dend)
#
def set_position(self, x, y, z):
"""
Set the base location in 3D and move all other
parts of the cell relative to that location.
"""
for sec in self.all:
# note: iterating like this changes the context for all NEURON
# functions that depend on a section, so no need to specify sec=
for i in range(int(h.n3d())):
h.pt3dchange(i,
x - self.x + h.x3d(i),
y - self.y + h.y3d(i),
z - self.z + h.z3d(i),
h.diam3d(i))
self.x, self.y, self.z = x, y, z
#
def rotateZ(self, theta):
"""Rotate the cell about the Z axis."""
for sec in self.all:
for i in range(2):
x = h.x3d(i) * sin(theta) + h.y3d(i) * cos(theta)
y = h.x3d(i) * cos(theta) + h.y3d(i) * -sin(theta)
h.pt3dchange(i, x, y, h.z3d(i), h.diam3d(i))
|
penguinscontrol/Spinal-Cord-Modeling
|
ClarkesNetwork/ballandstick_passive.py
|
Python
|
gpl-2.0
| 4,811
|
[
"NEURON"
] |
4a61baafae40e72944278c713082f5e8cd18eef2d0cb64cc1b16a95eadf2a4ae
|
from math import *
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# 20cm translation distance
dx=0.2
dy=0.35
trans = sqrt(dx*dx + dy*dy)
# rotation1 = 30 degree
theta1 = 30.0*pi/180.0
# rotation2 = 45 degree
theta2 = 10.0*pi/180.0
rot1 = atan2(dy, dx) - theta1
rot2 = theta2-theta1-rot1
a1 = 0.05
a2 = 15.0*pi/180.0
a3 = 0.05
a4 = 0.01
sd_rot1 = a1*abs(rot1) + a2*trans
sd_rot2 = a1*abs(rot2) + a2*trans
sd_trans = a3*trans + a4*(abs(rot1) + abs(rot2))
x= []
y = []
for i in range(0, 1000):
t = trans + np.random.normal(0,sd_trans*sd_trans)
r1 = rot1 + np.random.normal(0, sd_rot1*sd_rot1)
r2 = rot2 + np.random.normal(0, sd_rot2*sd_rot2)
x.append(t*cos(theta1+r1))
y.append(t*sin(theta1+r1))
#matplotlib.rcParams['axes.unicode_minus'] = False
fig, ax = plt.subplots()
#ax.plot(x, y)
ax.scatter(x, y)
ax.set_title('Gaussian noise of motion model with dx=0.2 and dy=0.35')
plt.show()
|
CARMinesDouai/MultiRobotExplorationPackages
|
noisy_odometry/scripts/view_distribution.py
|
Python
|
mit
| 933
|
[
"Gaussian"
] |
aaaf3c0a91fd1609f18523f83f64c7ccd5e88f8bc9462f95181c30022bc5b225
|
#!/usr/bin/env python
# The glassbrain class copied from The NeuroImaging Analysis Framework (NAF) repositories
# The code is covered under GNU GPL v2.
# Usage example.
'''
brain = ConnecBrain("fsaverage", "lh", "inflated")
coords = np.array([[-27., 23., 48.],
[-41.,-60., 29.],
[-64., -20., -9.],
[ -7., 49., 18.],
[ -7., -52., 26.]])
labels = ['MFG','AG','MTG','PCC','MPFC']
brain.add_coords(coords, color='green', labels=labels, scale_factor=1)
brain.add_arrow(coords[:2,:], color='red')
mlab.view(45,135)
# Note: If used in a jumeg module, use this below import statements.
# try:
# import glassbrain
# except Exception as e:
# print ('Unable to import glassbrain check mayavi and pysurfer config.')
'''
import numpy as np
from matplotlib.colors import colorConverter
from mayavi import mlab
from mayavi.mlab import pipeline as mp
import surfer
class ConnecBrain(surfer.Brain):
"""
Subclass of sufer.Brain which allows adding co-ordinates and arrows
to denote directional connectivity estimates
"""
def __init__(self, subject_id, hemi, surf='inflated', curv=True,
title=None, config_opts={}, figure=None, subjects_dir=None,
views=['lat'], show_toolbar=False, offscreen=False,
opacity=0.3):
# Call our main constructor
surfer.Brain.__init__(self, subject_id, hemi, surf, views=views, curv=curv,
config_opts=config_opts, subjects_dir=subjects_dir)
#surfer.Brain.__init__(self, subject_id, hemi, surf, curv, title,
# config_opts, figure, subjects_dir,
# views, show_toolbar, offscreen)
# Initialise our arrows dictionary
self.arrows_dict = dict()
# Set all brain opacities
for b in self._brain_list:
b['brain']._geo_surf.actor.property.opacity = opacity
def arrows(self):
"""Wrap to arrows"""
return self._get_one_brain(self.arrows_dict, 'arrows')
def add_coords(self, coords, map_surface=None, scale_factor=1.5,
color="red", alpha=1, name=None, labels=None, hemi=None,
text_size=5, txt_pos=[1.4, 1.1, 1.1]):
"""
Plot locations onto the brain surface as spheres.
:param coords: list of co-ordinates or (n, 3) numpy array. Co-ordinate
space must match that of the underlying MRI image
:param map_surface: Freesurfer surf or None.
surface to map coordinates through, or None to use raw coords
:param scale_factor: int
controls the size of the foci spheres
:param color: matplotlib color code
HTML name, RGB tuple or hex code
:param alpha: float in [0, 1]
opacity of coordinate spheres
:param name: str
internal name to use (_foci and _labels will be appended)
:param labels:
List of text strings used to label co-ordinates
:param hemi: str | None
If None, assumed to belong to the hemisphere being shown.
If two hemispheresa are being shown, an error will be thrown
:param text_size: int
Text size of labels
"""
hemi = self._check_hemi(hemi)
if map_surface is None:
foci_vtxs = surfer.utils.find_closest_vertices(self.geo[hemi].coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
else:
foci_surf = surfer.utils.Surface(self.subject_id, hemi, map_surface,
subjects_dir=self.subjects_dir)
foci_surf.load_geometry()
foci_vtxs = surfer.utils.find_closest_vertices(foci_surf.coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
if name is None:
name = "coords_%s" % (max(len(self.foci_dict) + 1,
len(self.labels_dict) + 1))
views = self._toggle_render(False)
# Store the coords in the foci list and the label in the labels list
fl = []
# Create the visualization
for brain in self._brain_list:
if brain['hemi'] == hemi:
fl.append(mlab.points3d(foci_coords[:, 0],
foci_coords[:, 1],
foci_coords[:, 2],
np.ones(foci_coords.shape[0]),
scale_factor=(10. * scale_factor),
color=color, opacity=alpha,
name=name + '_foci',
figure=brain['brain']._f))
self.foci_dict[name + '_foci'] = fl
if labels is not None:
tl = []
for i in range(coords.shape[0]):
tl.append(mlab.text3d(foci_coords[i, 0]*txt_pos[0],
foci_coords[i, 1]*txt_pos[1],
foci_coords[i, 2]*txt_pos[2],
labels[i],
color=(1.0, 1.0, 1.0),
scale=text_size,
name=name + '_label',
figure=brain['brain']._f))
self.labels_dict[name + '_label'] = fl
self._toggle_render(True, views)
def add_arrow(self, coords, map_surface=None, tube_radius=3.0,
color="white", alpha=1, name=None, hemi=None):
"""
Add an arrow across the brain between two co-ordinates
:param coords: list of co-ordinates or (n, 3) numpy array. Co-ordinate
space must match that of the underlying MRI image
:param tube_radius: float
controls the size of the arrow
:param color: matplotlib color code
HTML name, RGB tuple or hex code
:param alpha: float in [0, 1]
opacity of coordinate spheres
:param name: str
internal name to use
:param hemi: str | None
If None, assumed to belong to the hemisphere being shown.
If two hemispheresa are being shown, an error will be thrown
"""
hemi = self._check_hemi(hemi)
if map_surface is None:
foci_vtxs = surfer.utils.find_closest_vertices(self.geo[hemi].coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
else:
foci_surf = surfer.utils.Surface(self.subject_id, hemi, map_surface,
subjects_dir=self.subjects_dir)
foci_surf.load_geometry()
foci_vtxs = surfer.utils.find_closest_vertices(foci_surf.coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
# foci_vtxs = surfer.utils.find_closest_vertices(self.geo[hemi].coords, coords)
# foci_coords = self.geo[hemi].coords[foci_vtxs]
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
if name is None:
name = "arrow_%s" % (len(self.arrows_dict) + 1)
nsegs = 100
x = np.linspace(foci_coords[0, 0], foci_coords[1, 0], nsegs)
y = np.linspace(foci_coords[0, 1], foci_coords[1, 1], nsegs)
z = np.linspace(foci_coords[0, 2], foci_coords[1, 2], nsegs)
line_coords = np.vstack((x, y, z)).transpose()
step = 5
idx_a = list(range(0, nsegs+1, step))
idx_b = list(range(10, nsegs+1, step))
views = self._toggle_render(False)
al = []
for brain in self._brain_list:
if brain['hemi'] == hemi:
for start,end in zip(idx_a, idx_b):
seg_width = tube_radius - (start*(tube_radius-.5)/100.)
al.append(mlab.plot3d(line_coords[start:end, 0],
line_coords[start:end, 1],
line_coords[start:end, 2],
np.ones_like(line_coords[start:end, 0]),
color=color, opacity=alpha,
tube_radius=seg_width,
name=name,
figure=brain['brain']._f))
self.arrows_dict[name] = al
self._toggle_render(True, views)
|
fboers/jumeg
|
jumeg/glassbrain.py
|
Python
|
bsd-3-clause
| 8,626
|
[
"Mayavi"
] |
8e21e8e4c09b096f35392644d798a2b4472c0bf049665e616343f7d9c67278e1
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import range
from future.utils import viewitems
import six
import itertools
import math
import re
import collections
import copy
import numbers
import textwrap
from contextlib import contextmanager
import numpy as np
from scipy.spatial.distance import hamming
import pandas as pd
from skbio._base import SkbioObject
from skbio.sequence._base import ElasticLines
from skbio.util._misc import chunk_str
from skbio.util._decorator import stable, experimental
class Sequence(collections.Sequence, SkbioObject):
"""Store biological sequence data and optional associated metadata.
``Sequence`` objects do not enforce an alphabet and are thus the most
generic objects for storing biological sequence data. Subclasses ``DNA``,
``RNA``, and ``Protein`` enforce the IUPAC character set [1]_ for, and
provide operations specific to, each respective molecule type.
``Sequence`` objects consist of the underlying sequence data, as well
as optional metadata and positional metadata. The underlying sequence
is immutable, while the metdata and positional metadata are mutable.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the biological sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence. A shallow copy
of the ``dict`` will be made (see Examples section below for details).
positional_metadata : pd.DataFrame consumable, optional
Arbitrary per-character metadata (e.g., sequence read quality
scores). Must be able to be passed directly to ``pd.DataFrame``
constructor. Each column of metadata must be the same length as the
biological sequence. A shallow copy of the positional metadata will be
made if necessary (see Examples section below for details).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters. If ``False``, no characters will be converted.
If a str, it will be treated as a key into the positional metadata of
the object. All lowercase characters will be converted to uppercase,
and a ``True`` value will be stored in a boolean array in the
positional metadata under the key.
Attributes
----------
values
metadata
positional_metadata
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
**Creating sequences:**
Create a sequence without any metadata:
>>> seq = Sequence('GGUCGUGAAGGA')
>>> seq
Sequence
---------------
Stats:
length: 12
---------------
0 GGUCGUGAAG GA
Create a sequence with metadata and positional metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> positional_metadata = {'quality': [3, 3, 4, 10],
... 'exons': [True, True, False, True]}
>>> seq = Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'seq-id'
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
**Retrieving underlying sequence data:**
Retrieve underlying sequence:
>>> seq.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'C', b'G', b'T'],
dtype='|S1')
Underlying sequence immutable:
>>> seq.values = np.array([b'T', b'C', b'G', b'A'], dtype='|S1')
Traceback (most recent call last):
...
AttributeError: can't set attribute
>>> seq.values[0] = b'T'
Traceback (most recent call last):
...
ValueError: assignment destination is read-only
**Retrieving sequence metadata:**
Retrieve metadata:
>>> pprint(seq.metadata) # using pprint to display dict in sorted order
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 4
3 True 10
**Updating sequence metadata:**
.. warning:: Be aware that a shallow copy of ``metadata`` and
``positional_metadata`` is made for performance. Since a deep copy is
not made, changes made to mutable Python objects stored as metadata may
affect the metadata of other ``Sequence`` objects or anything else that
shares a reference to the object. The following examples illustrate this
behavior.
First, let's create a sequence and update its metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> seq = Sequence('ACGT', metadata=metadata)
>>> seq.metadata['id'] = 'new-id'
>>> seq.metadata['pubmed'] = 12345
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'new-id', 'pubmed': 12345}
Note that the original metadata dictionary (stored in variable
``metadata``) hasn't changed because a shallow copy was made:
>>> pprint(metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
>>> seq.metadata == metadata
False
Note however that since only a *shallow* copy was made, updates to mutable
objects will also change the original metadata dictionary:
>>> seq.metadata['authors'].append('Bob')
>>> seq.metadata['authors']
['Alice', 'Bob']
>>> metadata['authors']
['Alice', 'Bob']
This behavior can also occur when manipulating a sequence that has been
derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'new-id'
'pubmed': 12345
Stats:
length: 2
-----------------------------
0 CG
>>> pprint(subseq.metadata)
{'authors': ['Alice', 'Bob'],
'desc': 'seq desc',
'id': 'new-id',
'pubmed': 12345}
The subsequence has inherited the metadata of its parent sequence. If we
update the subsequence's author list, we see the changes propagated in the
parent sequence and original metadata dictionary:
>>> subseq.metadata['authors'].append('Carol')
>>> subseq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> seq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> metadata['authors']
['Alice', 'Bob', 'Carol']
The behavior for updating positional metadata is similar. Let's create a
new sequence with positional metadata that is already stored in a
``pd.DataFrame``:
>>> positional_metadata = pd.DataFrame(
... {'quality': [3, 3, 4, 10], 'list': [[], [], [], []]})
>>> seq = Sequence('ACGT', positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
>>> seq.positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
Now let's update the sequence's positional metadata by adding a new column
and changing a value in another column:
>>> seq.positional_metadata['gaps'] = [False, False, False, False]
>>> seq.positional_metadata.loc[0, 'quality'] = 999
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [] 3 False
2 [] 4 False
3 [] 10 False
Note that the original positional metadata (stored in variable
``positional_metadata``) hasn't changed because a shallow copy was made:
>>> positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
>>> seq.positional_metadata.equals(positional_metadata)
False
Next let's create a sequence that has been derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
'gaps': <dtype: bool>
Stats:
length: 2
-----------------------------
0 CG
>>> subseq.positional_metadata
list quality gaps
0 [] 3 False
1 [] 4 False
As described above for metadata, since only a *shallow* copy was made of
the positional metadata, updates to mutable objects will also change the
parent sequence's positional metadata and the original positional metadata
``pd.DataFrame``:
>>> subseq.positional_metadata.loc[0, 'list'].append('item')
>>> subseq.positional_metadata
list quality gaps
0 [item] 3 False
1 [] 4 False
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [item] 3 False
2 [] 4 False
3 [] 10 False
>>> positional_metadata
list quality
0 [] 3
1 [item] 3
2 [] 4
3 [] 10
"""
# ASCII is built such that the difference between uppercase and lowercase
# is the 6th bit.
_ascii_invert_case_bit_offset = 32
_ascii_lowercase_boundary = 90
default_write_format = 'fasta'
__hash__ = None
@property
@stable(as_of="0.4.0")
def values(self):
"""Array containing underlying sequence characters.
Notes
-----
This property is not writeable.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AACGA')
>>> s.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'A', b'C', b'G', b'A'],
dtype='|S1')
"""
return self._bytes.view('|S1')
@property
@stable(as_of="0.4.0")
def metadata(self):
"""``dict`` containing metadata which applies to the entire sequence.
Notes
-----
This property can be set and deleted.
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
Create a sequence with metadata:
>>> s = Sequence('ACGTACGTACGTACGT',
... metadata={'id': 'seq-id',
... 'description': 'seq description'})
>>> s
Sequence
------------------------------------
Metadata:
'description': 'seq description'
'id': 'seq-id'
Stats:
length: 16
------------------------------------
0 ACGTACGTAC GTACGT
Retrieve metadata:
>>> pprint(s.metadata) # using pprint to display dict in sorted order
{'description': 'seq description', 'id': 'seq-id'}
Update metadata:
>>> s.metadata['id'] = 'new-id'
>>> s.metadata['pubmed'] = 12345
>>> pprint(s.metadata)
{'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
Set metadata:
>>> s.metadata = {'abc': 123}
>>> s.metadata
{'abc': 123}
Delete metadata:
>>> s.has_metadata()
True
>>> del s.metadata
>>> s.metadata
{}
>>> s.has_metadata()
False
"""
if self._metadata is None:
# not using setter to avoid copy
self._metadata = {}
return self._metadata
@metadata.setter
def metadata(self, metadata):
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict")
# shallow copy
self._metadata = metadata.copy()
@metadata.deleter
def metadata(self):
self._metadata = None
@property
@stable(as_of="0.4.0")
def positional_metadata(self):
"""``pd.DataFrame`` containing metadata on a per-character basis.
Notes
-----
This property can be set and deleted.
Examples
--------
Create a DNA sequence with positional metadata:
>>> from skbio import DNA
>>> seq = DNA(
... 'ACGT',
... positional_metadata={'quality': [3, 3, 20, 11],
... 'exons': [True, True, False, True]})
>>> seq
DNA
-----------------------------
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 50.00%
-----------------------------
0 ACGT
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 20
3 True 11
Update positional metadata:
>>> seq.positional_metadata['gaps'] = seq.gaps()
>>> seq.positional_metadata
exons quality gaps
0 True 3 False
1 True 3 False
2 False 20 False
3 True 11 False
Set positional metadata:
>>> seq.positional_metadata = {'degenerates': seq.degenerates()}
>>> seq.positional_metadata
degenerates
0 False
1 False
2 False
3 False
Delete positional metadata:
>>> seq.has_positional_metadata()
True
>>> del seq.positional_metadata
>>> seq.positional_metadata
Empty DataFrame
Columns: []
Index: [0, 1, 2, 3]
>>> seq.has_positional_metadata()
False
"""
if self._positional_metadata is None:
# not using setter to avoid copy
self._positional_metadata = pd.DataFrame(
index=np.arange(len(self)))
return self._positional_metadata
@positional_metadata.setter
def positional_metadata(self, positional_metadata):
try:
# copy=True to copy underlying data buffer
positional_metadata = pd.DataFrame(positional_metadata, copy=True)
except pd.core.common.PandasError as e:
raise TypeError('Positional metadata invalid. Must be consumable '
'by pd.DataFrame. Original pandas error message: '
'"%s"' % e)
num_rows = len(positional_metadata.index)
if num_rows != len(self):
raise ValueError(
"Number of positional metadata values (%d) must match the "
"number of characters in the sequence (%d)." %
(num_rows, len(self)))
positional_metadata.reset_index(drop=True, inplace=True)
self._positional_metadata = positional_metadata
@positional_metadata.deleter
def positional_metadata(self):
self._positional_metadata = None
@property
def _string(self):
return self._bytes.tostring()
@stable(as_of="0.4.0")
def __init__(self, sequence, metadata=None, positional_metadata=None,
lowercase=False):
if isinstance(sequence, np.ndarray):
if sequence.dtype == np.uint8:
self._set_bytes_contiguous(sequence)
elif sequence.dtype == '|S1':
sequence = sequence.view(np.uint8)
# Guarantee the sequence is an array (might be scalar before
# this).
if sequence.shape == ():
sequence = np.array([sequence], dtype=np.uint8)
self._set_bytes_contiguous(sequence)
else:
raise TypeError(
"Can only create sequence from numpy.ndarray of dtype "
"np.uint8 or '|S1'. Invalid dtype: %s" %
sequence.dtype)
elif isinstance(sequence, Sequence):
# we're not simply accessing sequence.metadata in order to avoid
# creating "empty" metadata representations on both sequence
# objects if they don't have metadata. same strategy is used below
# for positional metadata
if metadata is None and sequence.has_metadata():
metadata = sequence.metadata
if (positional_metadata is None and
sequence.has_positional_metadata()):
positional_metadata = sequence.positional_metadata
sequence = sequence._bytes
self._owns_bytes = False
self._set_bytes(sequence)
else:
# Python 3 will not raise a UnicodeEncodeError so we force it by
# encoding it as ascii
if isinstance(sequence, six.text_type):
sequence = sequence.encode("ascii")
s = np.fromstring(sequence, dtype=np.uint8)
# There are two possibilities (to our knowledge) at this point:
# Either the sequence we were given was something string-like,
# (else it would not have made it past fromstring), or it was a
# numpy scalar, and so our length must be 1.
if isinstance(sequence, np.generic) and len(s) != 1:
raise TypeError("Can cannot create a sequence with %r" %
type(sequence).__name__)
sequence = s
self._owns_bytes = True
self._set_bytes(sequence)
if metadata is None:
self._metadata = None
else:
self.metadata = metadata
if positional_metadata is None:
self._positional_metadata = None
else:
self.positional_metadata = positional_metadata
if lowercase is False:
pass
elif lowercase is True or isinstance(lowercase, six.string_types):
lowercase_mask = self._bytes > self._ascii_lowercase_boundary
self._convert_to_uppercase(lowercase_mask)
# If it isn't True, it must be a string_type
if not (lowercase is True):
self.positional_metadata[lowercase] = lowercase_mask
else:
raise TypeError("lowercase keyword argument expected a bool or "
"string, but got %s" % type(lowercase))
def _set_bytes_contiguous(self, sequence):
"""Munge the sequence data into a numpy array of dtype uint8."""
if not sequence.flags['C_CONTIGUOUS']:
# numpy doesn't support views of non-contiguous arrays. Since we're
# making heavy use of views internally, and users may also supply
# us with a view, make sure we *always* store a contiguous array to
# avoid hard-to-track bugs. See
# https://github.com/numpy/numpy/issues/5716
sequence = np.ascontiguousarray(sequence)
self._owns_bytes = True
else:
self._owns_bytes = False
self._set_bytes(sequence)
def _set_bytes(self, sequence):
sequence.flags.writeable = False
self._bytes = sequence
def _convert_to_uppercase(self, lowercase):
if np.any(lowercase):
with self._byte_ownership():
self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
@stable(as_of="0.4.0")
def __contains__(self, subsequence):
"""Determine if a subsequence is contained in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
The putative subsequence.
Returns
-------
bool
Indicates whether `subsequence` is contained in the biological
sequence.
Raises
------
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
>>> 'GGU' in s
True
>>> 'CCC' in s
False
"""
return self._munge_to_bytestring(subsequence, "in") in self._string
@stable(as_of="0.4.0")
def __eq__(self, other):
"""Determine if the biological sequence is equal to another.
Biological sequences are equal if they are *exactly* the same type and
their sequence characters, metadata, and positional metadata are the
same.
Parameters
----------
other : Sequence
Sequence to test for equality against.
Returns
-------
bool
Indicates whether the biological sequence is equal to `other`.
Examples
--------
Define two biological sequences that have the same underlying sequence
of characters:
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
The two sequences are considered equal because they are the same type,
their underlying sequence of characters are the same, and their
optional metadata attributes (``metadata`` and ``positional_metadata``)
were not provided:
>>> s == t
True
>>> t == s
True
Define another biological sequence with a different sequence of
characters than the previous two biological sequences:
>>> u = Sequence('ACGA')
>>> u == t
False
Define a biological sequence with the same sequence of characters as
``u`` but with different metadata and positional metadata:
>>> v = Sequence('ACGA', metadata={'id': 'abc'},
... positional_metadata={'quality':[1, 5, 3, 3]})
The two sequences are not considered equal because their metadata and
positional metadata do not match:
>>> u == v
False
"""
# checks ordered from least to most expensive
if self.__class__ != other.__class__:
return False
# we're not simply comparing self.metadata to other.metadata in order
# to avoid creating "empty" metadata representations on the sequence
# objects if they don't have metadata. same strategy is used below for
# positional metadata
if self.has_metadata() and other.has_metadata():
if self.metadata != other.metadata:
return False
elif not (self.has_metadata() or other.has_metadata()):
# both don't have metadata
pass
else:
# one has metadata while the other does not
return False
if self._string != other._string:
return False
if self.has_positional_metadata() and other.has_positional_metadata():
if not self.positional_metadata.equals(other.positional_metadata):
return False
elif not (self.has_positional_metadata() or
other.has_positional_metadata()):
# both don't have positional metadata
pass
else:
# one has positional metadata while the other does not
return False
return True
@stable(as_of="0.4.0")
def __ne__(self, other):
"""Determine if the biological sequence is not equal to another.
Biological sequences are not equal if they are not *exactly* the same
type, or their sequence characters, metadata, or positional metadata
differ.
Parameters
----------
other : Sequence
Sequence to test for inequality against.
Returns
-------
bool
Indicates whether the biological sequence is not equal to `other`.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
>>> s != t
False
>>> u = Sequence('ACGA')
>>> u != t
True
>>> v = Sequence('ACGA', metadata={'id': 'v'})
>>> u != v
True
"""
return not (self == other)
@stable(as_of="0.4.0")
def __getitem__(self, indexable):
"""Slice the biological sequence.
Parameters
----------
indexable : int, slice, iterable (int and slice), 1D array_like (bool)
The position(s) to return from the biological sequence. If
`indexable` is an iterable of integers, these are assumed to be
indices in the sequence to keep. If `indexable` is a 1D
``array_like`` of booleans, these are assumed to be the positions
in the sequence to keep.
Returns
-------
Sequence
New biological sequence containing the position(s) specified by
`indexable` in the current biological sequence. If quality scores
are present, they will be sliced in the same manner and included in
the returned biological sequence. ID and description are also
included.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
Obtain a single character from the biological sequence:
>>> s[1]
Sequence
-------------
Stats:
length: 1
-------------
0 G
Obtain a slice:
>>> s[7:]
Sequence
-------------
Stats:
length: 5
-------------
0 AAGGA
Obtain characters at the following indices:
>>> s[[3, 4, 7, 0, 3]]
Sequence
-------------
Stats:
length: 5
-------------
0 CGAGC
Obtain characters at positions evaluating to `True`:
>>> s = Sequence('GGUCG')
>>> index = [True, False, True, 'a' is 'a', False]
>>> s[index]
Sequence
-------------
Stats:
length: 3
-------------
0 GUC
"""
if (not isinstance(indexable, np.ndarray) and
((not isinstance(indexable, six.string_types)) and
hasattr(indexable, '__iter__'))):
indexable_ = indexable
indexable = np.asarray(indexable)
if indexable.dtype == object:
indexable = list(indexable_) # TODO: Don't blow out memory
if len(indexable) == 0:
# indexing with an empty list, so convert to ndarray and
# fall through to ndarray slicing below
indexable = np.asarray(indexable)
else:
seq = np.concatenate(
list(_slices_from_iter(self._bytes, indexable)))
index = _as_slice_if_single_index(indexable)
positional_metadata = None
if self.has_positional_metadata():
pos_md_slices = list(_slices_from_iter(
self.positional_metadata, index))
positional_metadata = pd.concat(pos_md_slices)
return self._to(sequence=seq,
positional_metadata=positional_metadata)
elif (isinstance(indexable, six.string_types) or
isinstance(indexable, bool)):
raise IndexError("Cannot index with %s type: %r" %
(type(indexable).__name__, indexable))
if (isinstance(indexable, np.ndarray) and
indexable.dtype == bool and
len(indexable) != len(self)):
raise IndexError("An boolean vector index must be the same length"
" as the sequence (%d, not %d)." %
(len(self), len(indexable)))
if isinstance(indexable, np.ndarray) and indexable.size == 0:
# convert an empty ndarray to a supported dtype for slicing a numpy
# array
indexable = indexable.astype(int)
seq = self._bytes[indexable]
positional_metadata = self._slice_positional_metadata(indexable)
return self._to(sequence=seq, positional_metadata=positional_metadata)
def _slice_positional_metadata(self, indexable):
if self.has_positional_metadata():
if _is_single_index(indexable):
index = _single_index_to_slice(indexable)
else:
index = indexable
return self.positional_metadata.iloc[index]
else:
return None
@stable(as_of="0.4.0")
def __len__(self):
"""Return the number of characters in the biological sequence.
Returns
-------
int
The length of the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> len(s)
4
"""
return self._bytes.size
@stable(as_of="0.4.0")
def __nonzero__(self):
"""Returns truth value (truthiness) of sequence.
Returns
-------
bool
True if length of sequence is greater than 0, else False.
Examples
--------
>>> from skbio import Sequence
>>> bool(Sequence(''))
False
>>> bool(Sequence('ACGT'))
True
"""
return len(self) > 0
@stable(as_of="0.4.0")
def __iter__(self):
"""Iterate over positions in the biological sequence.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in s:
... str(c)
'G'
'G'
'U'
'C'
"""
for i in range(len(self)):
yield self[i]
@stable(as_of="0.4.0")
def __reversed__(self):
"""Iterate over positions in the biological sequence in reverse order.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in reversed(s):
... str(c)
'C'
'U'
'G'
'G'
"""
return iter(self[::-1])
@stable(as_of="0.4.0")
def __str__(self):
"""Return biological sequence characters as a string.
Returns
-------
str
Sequence characters as a string. No metadata or positional
metadata will be included.
See Also
--------
sequence
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUAAAGGA', metadata={'id':'hello'})
>>> str(s)
'GGUCGUAAAGGA'
"""
return str(self._string.decode("ascii"))
@stable(as_of="0.4.0")
def __repr__(self):
r"""Return a string representation of the biological sequence object.
Representation includes:
* sequence type
* metadata keys and values: will display key/value if it is an
understood type, otherwise just the type will be displayed. If it is
an understood type whose representation is too long, just the type
will be displayed
* positional metadata: column names and column dtypes will be displayed
in the order they appear in the positional metadata ``pd.DataFrame``.
Column names (i.e., keys) follow the same display rules as metadata
keys
* sequence stats (e.g., length)
* up to five lines of chunked sequence data. Each line of chunked
sequence data displays the current position in the sequence
Returns
-------
str
String representation of the biological sequence object.
Notes
-----
Subclasses can override Sequence._repr_stats to provide custom
statistics.
Examples
--------
Short sequence without metadata:
>>> from skbio import Sequence
>>> Sequence('ACGTAATGGATACGTAATGCA')
Sequence
-------------------------
Stats:
length: 21
-------------------------
0 ACGTAATGGA TACGTAATGC A
Longer sequence displays first two lines and last two lines:
>>> Sequence('ACGT' * 100)
Sequence
---------------------------------------------------------------------
Stats:
length: 400
---------------------------------------------------------------------
0 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
60 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
...
300 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
360 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
Sequence with metadata and positional metadata:
>>> metadata = {
... 'id': 'seq-id',
... 'description': 'description of the sequence, wrapping across '
... 'lines if it\'s too long',
... 'authors': ['Alice', 'Bob', 'Carol'],
... 'year': 2015,
... 'published': True
... }
>>> positional_metadata = {
... 'quality': [3, 10, 11, 10],
... 'exons': [True, True, False, True]
... }
>>> Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
Sequence
----------------------------------------------------------------------
Metadata:
'authors': <class 'list'>
'description': "description of the sequence, wrapping across lines
if it's too long"
'id': 'seq-id'
'published': True
'year': 2015
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
----------------------------------------------------------------------
0 ACGT
"""
return _SequenceReprBuilder(
seq=self,
width=71, # 79 for pep8, 8 space indent for docstrings
indent=4,
chunk_size=10).build()
def _repr_stats(self):
"""Define statistics to display in the sequence's repr.
Subclasses can override this method to provide type-specific
statistics.
This method computes a single statistic: length.
Returns
-------
list
List of tuples where each tuple represents a statistic. Each tuple
contains exactly two ``str`` elements: the statistic's name/label,
and the str-formatted value of the statistic. Ordering of
statistics (i.e., list order) determines display order in the
sequence repr.
"""
return [('length', '%d' % len(self))]
@stable(as_of="0.4.0")
def __copy__(self):
"""Return a shallow copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=False)``.
"""
return self.copy(deep=False)
@stable(as_of="0.4.0")
def __deepcopy__(self, memo):
"""Return a deep copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=True)``.
"""
return self._copy(True, memo)
@stable(as_of="0.4.0")
def has_metadata(self):
"""Determine if the sequence contains metadata.
Returns
-------
bool
Indicates whether the sequence has metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_metadata()
False
>>> t = DNA('ACACGACGTT', metadata={'id': 'seq-id'})
>>> t.has_metadata()
True
"""
return self._metadata is not None and bool(self.metadata)
@stable(as_of="0.4.0")
def has_positional_metadata(self):
"""Determine if the sequence contains positional metadata.
Returns
-------
bool
Indicates whether the sequence has positional metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_positional_metadata()
False
>>> t = DNA('ACACGACGTT', positional_metadata={'quality': range(10)})
>>> t.has_positional_metadata()
True
"""
return (self._positional_metadata is not None and
len(self.positional_metadata.columns) > 0)
@stable(as_of="0.4.0")
def copy(self, deep=False):
"""Return a copy of the biological sequence.
Parameters
----------
deep : bool, optional
Perform a deep copy. If ``False``, perform a shallow copy.
Returns
-------
Sequence
Copy of the biological sequence.
Notes
-----
Since sequence objects can share the same underlying immutable sequence
data (or pieces of it), this method can be used to create a sequence
object with its own copy of the sequence data so that the original
sequence data can be garbage-collected.
Examples
--------
Create a sequence:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('ACGT',
... metadata={'id': 'seq-id', 'authors': ['Alice']},
... positional_metadata={'quality': [7, 10, 8, 5],
... 'list': [[], [], [], []]})
Make a shallow copy of the sequence:
>>> seq_copy = seq.copy()
>>> seq_copy == seq
True
Setting new references in the copied sequence's metadata doesn't affect
the original sequence's metadata:
>>> seq_copy.metadata['id'] = 'new-id'
>>> pprint(seq_copy.metadata)
{'authors': ['Alice'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'quality'] = 999
>>> seq_copy.positional_metadata
list quality
0 [] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [] 7
1 [] 10
2 [] 8
3 [] 5
Since only a *shallow* copy was made, updates to mutable objects stored
as metadata affect the original sequence's metadata:
>>> seq_copy.metadata['authors'].append('Bob')
>>> pprint(seq_copy.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'list'].append(1)
>>> seq_copy.positional_metadata
list quality
0 [1] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
Perform a deep copy to avoid this behavior:
>>> seq_deep_copy = seq.copy(deep=True)
Updates to mutable objects no longer affect the original sequence's
metadata:
>>> seq_deep_copy.metadata['authors'].append('Carol')
>>> pprint(seq_deep_copy.metadata)
{'authors': ['Alice', 'Bob', 'Carol'], 'id': 'seq-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
Nor its positional metadata:
>>> seq_deep_copy.positional_metadata.loc[0, 'list'].append(2)
>>> seq_deep_copy.positional_metadata
list quality
0 [1, 2] 7
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
"""
return self._copy(deep, {})
def _copy(self, deep, memo):
# strategy: copy the sequence without metadata first, then set metadata
# attributes with copies. we take this approach instead of simply
# passing the metadata through the Sequence constructor because we
# don't want to copy twice (this could happen when deep=True, where we
# deep copy here and then shallow copy in the Sequence constructor). we
# also directly set the private metadata attributes instead of using
# their public setters to avoid an unnecessary copy
# we don't make a distinction between deep vs. shallow copy of bytes
# because dtype=np.uint8. we only need to make the distinction when
# dealing with object dtype
bytes = np.copy(self._bytes)
seq_copy = self._constructor(sequence=bytes, metadata=None,
positional_metadata=None)
if self.has_metadata():
metadata = self.metadata
if deep:
metadata = copy.deepcopy(metadata, memo)
else:
metadata = metadata.copy()
seq_copy._metadata = metadata
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
if deep:
positional_metadata = copy.deepcopy(positional_metadata, memo)
else:
# deep=True makes a shallow copy of the underlying data buffer
positional_metadata = positional_metadata.copy(deep=True)
seq_copy._positional_metadata = positional_metadata
return seq_copy
@stable(as_of='0.4.0')
def lowercase(self, lowercase):
"""Return a case-sensitive string representation of the sequence.
Parameters
----------
lowercase: str or boolean vector
If lowercase is a boolean vector, it is used to set sequence
characters to lowercase in the output string. True values in the
boolean vector correspond to lowercase characters. If lowercase
is a str, it is treated like a key into the positional metadata,
pointing to a column which must be a boolean vector.
That boolean vector is then used as described previously.
Returns
-------
str
String representation of sequence with specified characters set to
lowercase.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> s.lowercase([True, True, False, False])
'acGT'
>>> s = Sequence('ACGT',
... positional_metadata={
... 'exons': [True, False, False, True]})
>>> s.lowercase('exons')
'aCGt'
Constructor automatically populates a column in positional metadata
when the ``lowercase`` keyword argument is provided with a column name:
>>> s = Sequence('ACgt', lowercase='introns')
>>> s.lowercase('introns')
'ACgt'
>>> s = Sequence('ACGT', lowercase='introns')
>>> s.lowercase('introns')
'ACGT'
"""
index = self._munge_to_index_array(lowercase)
outbytes = self._bytes.copy()
outbytes[index] ^= self._ascii_invert_case_bit_offset
return str(outbytes.tostring().decode('ascii'))
@stable(as_of="0.4.0")
def count(self, subsequence, start=None, end=None):
"""Count occurrences of a subsequence in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to count occurrences of.
start : int, optional
The position at which to start counting (inclusive).
end : int, optional
The position at which to stop counting (exclusive).
Returns
-------
int
Number of occurrences of `subsequence` in the biological sequence.
Raises
------
ValueError
If `subsequence` is of length 0.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCG')
>>> s.count('G')
3
>>> s.count('GG')
1
>>> s.count('T')
0
>>> s.count('G', 2, 5)
1
"""
if len(subsequence) == 0:
raise ValueError("`count` is not defined for empty subsequences.")
return self._string.count(
self._munge_to_bytestring(subsequence, "count"), start, end)
@stable(as_of="0.4.0")
def index(self, subsequence, start=None, end=None):
"""Find position where subsequence first occurs in the sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to search for in the biological sequence.
start : int, optional
The position at which to start searching (inclusive).
end : int, optional
The position at which to stop searching (exclusive).
Returns
-------
int
Position where `subsequence` first occurs in the biological
sequence.
Raises
------
ValueError
If `subsequence` is not present in the biological sequence.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT-')
>>> s.index('ACG')
2
"""
try:
return self._string.index(
self._munge_to_bytestring(subsequence, "index"), start, end)
except ValueError:
raise ValueError(
"%r is not present in %r." % (subsequence, self))
@experimental(as_of="0.4.0")
def distance(self, other, metric=None):
"""Compute the distance to another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compute the distance to.
metric : function, optional
Function used to compute the distance between the biological
sequence and `other`. If ``None`` (the default),
``scipy.spatial.distance.hamming`` will be used. This function
should take two ``skbio.Sequence`` objects and return a ``float``.
Returns
-------
float
Distance between the biological sequence and `other`.
Raises
------
ValueError
If the sequences are not the same length when `metric` is ``None``
(i.e., `metric` is ``scipy.spatial.distance.hamming``). This is
only checked when using this metric, as equal length is not a
requirement of all sequence distance metrics. In general, the
metric itself should test and give an informative error message,
but the message from ``scipy.spatial.distance.hamming`` is somewhat
cryptic (as of this writing), and it's the default metric, so we
explicitly do this check here. This metric-specific check will be
removed from this method when the ``skbio.sequence.stats`` module
is created (track progress on issue #913).
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
fraction_diff
fraction_same
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.distance(t)
0.25
>>> def custom_dist(s1, s2): return 0.42
>>> s.distance(t, custom_dist)
0.42
"""
# TODO refactor this method to accept a name (string) of the distance
# metric to apply and accept **kwargs
other = self._munge_to_sequence(other, 'distance')
if metric is None:
return self._hamming(other)
return float(metric(self, other))
def _hamming(self, other):
# Hamming requires equal length sequences. We are checking this
# here because the error you would get otherwise is cryptic.
if len(self) != len(other):
raise ValueError(
"Sequences do not have equal length. "
"Hamming distances can only be computed between "
"sequences of equal length.")
return float(hamming(self.values, other.values))
@stable(as_of="0.4.0")
def matches(self, other):
"""Find positions that match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a match
between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.matches(t)
array([ True, False, True, False], dtype=bool)
"""
other = self._munge_to_sequence(other, 'matches/mismatches')
if len(self) != len(other):
raise ValueError("Match and mismatch vectors can only be "
"generated from equal length sequences.")
return self._bytes == other._bytes
@stable(as_of="0.4.0")
def mismatches(self, other):
"""Find positions that do not match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a
mismatch between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
matches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.mismatches(t)
array([False, True, False, True], dtype=bool)
"""
return np.invert(self.matches(other))
@stable(as_of="0.4.0")
def match_frequency(self, other, relative=False):
"""Return count of positions that are the same between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of matches instead of
the count.
Returns
-------
int or float
Number of positions that are the same between the sequences. This
will be an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatch_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.match_frequency(t)
3
>>> s.match_frequency(t, relative=True)
0.75
"""
if relative:
return float(self.matches(other).mean())
else:
return int(self.matches(other).sum())
@stable(as_of="0.4.0")
def mismatch_frequency(self, other, relative=False):
"""Return count of positions that differ between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of mismatches instead of
the count.
Returns
-------
int or float
Number of positions that differ between the sequences. This will be
an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
match_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.mismatch_frequency(t)
1
>>> s.mismatch_frequency(t, relative=True)
0.25
"""
if relative:
return float(self.mismatches(other).mean())
else:
return int(self.mismatches(other).sum())
@stable(as_of="0.4.0")
def iter_kmers(self, k, overlap=True):
"""Generate kmers of length `k` from the biological sequence.
Parameters
----------
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Yields
------
Sequence
kmer of length `k` contained in the biological sequence.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT')
>>> for kmer in s.iter_kmers(4, overlap=False):
... str(kmer)
'ACAC'
'GACG'
>>> for kmer in s.iter_kmers(3, overlap=True):
... str(kmer)
'ACA'
'CAC'
'ACG'
'CGA'
'GAC'
'ACG'
'CGT'
'GTT'
"""
if k < 1:
raise ValueError("k must be greater than 0.")
if overlap:
step = 1
count = len(self) - k + 1
else:
step = k
count = len(self) // k
if self.has_positional_metadata():
for i in range(0, len(self) - k + 1, step):
yield self[i:i+k]
# Optimized path when no positional metadata
else:
kmers = np.lib.stride_tricks.as_strided(
self._bytes, shape=(k, count), strides=(1, step)).T
for s in kmers:
yield self._to(sequence=s)
@stable(as_of="0.4.0")
def kmer_frequencies(self, k, overlap=True, relative=False):
"""Return counts of words of length `k` from the biological sequence.
Parameters
----------
k : int
The word length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
relative : bool, optional
If ``True``, return the relative frequency of each kmer instead of
its count.
Returns
-------
collections.Counter or collections.defaultdict
Frequencies of words of length `k` contained in the biological
sequence. This will be a ``collections.Counter`` if `relative` is
``False`` and a ``collections.defaultdict`` if `relative` is
``True``.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from collections import defaultdict, Counter
>>> from skbio import Sequence
>>> s = Sequence('ACACATTTATTA')
>>> freqs = s.kmer_frequencies(3, overlap=False)
>>> freqs == Counter({'TTA': 2, 'ACA': 1, 'CAT': 1})
True
>>> freqs = s.kmer_frequencies(3, relative=True, overlap=False)
>>> freqs == defaultdict(float, {'ACA': 0.25, 'TTA': 0.5, 'CAT': 0.25})
True
"""
kmers = self.iter_kmers(k, overlap=overlap)
freqs = collections.Counter((str(seq) for seq in kmers))
if relative:
if overlap:
num_kmers = len(self) - k + 1
else:
num_kmers = len(self) // k
relative_freqs = collections.defaultdict(float)
for kmer, count in viewitems(freqs):
relative_freqs[kmer] = count / num_kmers
freqs = relative_freqs
return freqs
@stable(as_of="0.4.0")
def find_with_regex(self, regex, ignore=None):
"""Generate slices for patterns matched by a regular expression.
Parameters
----------
regex : str or regular expression object
String to be compiled into a regular expression, or a pre-
compiled regular expression object (e.g., from calling
``re.compile``).
ignore : 1D array_like (bool) or iterable (slices or ints), optional
Indicate the positions to ignore when matching.
Yields
------
slice
Location where the regular expression matched.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AATATACCGGTTATAA')
>>> for match in s.find_with_regex('(TATA+)'):
... match
... str(s[match])
slice(2, 6, None)
'TATA'
slice(11, 16, None)
'TATAA'
"""
if isinstance(regex, six.string_types):
regex = re.compile(regex)
lookup = np.arange(len(self))
if ignore is None:
string = str(self)
else:
ignore = self._munge_to_index_array(ignore)
lookup = np.delete(lookup, ignore)
string = str(self[lookup])
for match in regex.finditer(string):
# We start at 1 because we don't want the group that contains all
# other groups.
for g in range(1, len(match.groups())+1):
yield slice(lookup[match.start(g)],
lookup[match.end(g) - 1] + 1)
@stable(as_of="0.4.0")
def iter_contiguous(self, included, min_length=1, invert=False):
"""Yield contiguous subsequences based on `included`.
Parameters
----------
included : 1D array_like (bool) or iterable (slices or ints)
`included` is transformed into a flat boolean vector where each
position will either be included or skipped. All contiguous
included positions will be yielded as a single region.
min_length : int, optional
The minimum length of a subsequence for it to be yielded.
Default is 1.
invert : bool, optional
Whether to invert `included` such that it describes what should be
skipped instead of included. Default is False.
Yields
------
Sequence
Contiguous subsequence as indicated by `included`.
Notes
-----
If slices provide adjacent ranges, then they will be considered the
same contiguous subsequence.
Examples
--------
Here we use `iter_contiguous` to find all of the contiguous ungapped
sequences using a boolean vector derived from our DNA sequence.
>>> from skbio import DNA
>>> s = DNA('AAA--TT-CCCC-G-')
>>> no_gaps = ~s.gaps()
>>> for ungapped_subsequence in s.iter_contiguous(no_gaps,
... min_length=2):
... print(ungapped_subsequence)
AAA
TT
CCCC
Note how the last potential subsequence was skipped because it would
have been smaller than our `min_length` which was set to 2.
We can also use `iter_contiguous` on a generator of slices as is
produced by `find_motifs` (and `find_with_regex`).
>>> from skbio import Protein
>>> s = Protein('ACDFNASANFTACGNPNRTESL')
>>> for subseq in s.iter_contiguous(s.find_motifs('N-glycosylation')):
... print(subseq)
NASANFTA
NRTE
Note how the first subsequence contains two N-glycosylation sites. This
happened because they were contiguous.
"""
idx = self._munge_to_index_array(included)
if invert:
idx = np.delete(np.arange(len(self)), idx)
# Adapted from http://stackoverflow.com/a/7353335/579416
for contig in np.split(idx, np.where(np.diff(idx) != 1)[0] + 1):
r = self[contig]
if len(r) >= min_length:
yield r
def _to(self, sequence=None, metadata=None, positional_metadata=None):
"""Return a copy of the current biological sequence.
Returns a copy of the current biological sequence, optionally with
updated attributes specified as keyword arguments.
Arguments are the same as those passed to the ``Sequence`` constructor.
The returned copy will have its attributes updated based on the
arguments. If an attribute is missing, the copy will keep the same
attribute as the current biological sequence. Valid attribute names
are `'sequence'`, `'metadata'`, and `'positional_metadata'`. Default
behavior is to return a copy of the current biological sequence
without changing any attributes.
Parameters
----------
sequence : optional
metadata : optional
positional_metadata : optional
Returns
-------
Sequence
Copy of the current biological sequence, optionally with updated
attributes based on arguments. Will be the same type as the current
biological sequence (`self`).
Notes
-----
By default, `metadata` and `positional_metadata` are shallow-copied and
the reference to `sequence` is used (without copying) for efficiency
since `sequence` is immutable. This differs from the behavior of
`Sequence.copy`, which will actually copy `sequence`.
This method is the preferred way of creating new instances from an
existing biological sequence, instead of calling
``self.__class__(...)``, as the latter can be error-prone (e.g.,
it's easy to forget to propagate attributes to the new instance).
"""
if sequence is None:
sequence = self._bytes
if metadata is None:
metadata = self._metadata
if positional_metadata is None:
positional_metadata = self._positional_metadata
return self._constructor(sequence=sequence, metadata=metadata,
positional_metadata=positional_metadata)
def _constructor(self, **kwargs):
return self.__class__(**kwargs)
def _munge_to_index_array(self, sliceable):
"""Return an index array from something isomorphic to a boolean vector.
"""
if isinstance(sliceable, six.string_types):
if sliceable in self.positional_metadata:
if self.positional_metadata[sliceable].dtype == np.bool:
sliceable = self.positional_metadata[sliceable]
else:
raise TypeError("Column '%s' in positional metadata does "
"not correspond to a boolean vector" %
sliceable)
else:
raise ValueError("No positional metadata associated with key "
"'%s'" % sliceable)
if not hasattr(sliceable, 'dtype') or (hasattr(sliceable, 'dtype') and
sliceable.dtype == 'object'):
sliceable = tuple(sliceable)
bool_mode = False
int_mode = False
for s in sliceable:
if isinstance(s, (bool, np.bool_)):
bool_mode = True
elif isinstance(s, (slice, int, np.signedinteger)) or (
hasattr(s, 'dtype') and s.dtype != np.bool):
int_mode = True
else:
raise TypeError("Invalid type in iterable: %s, must be one"
" of {bool, int, slice, np.signedinteger}"
% s.__class__.__name__)
if bool_mode and int_mode:
raise TypeError("Cannot provide iterable of both bool and"
" int.")
sliceable = np.r_[sliceable]
if sliceable.dtype == np.bool:
if sliceable.size != len(self):
raise ValueError("Boolean array (%d) does not match length of"
" sequence (%d)."
% (sliceable.size, len(self)))
normalized, = np.where(sliceable)
else:
normalized = np.bincount(sliceable)
if np.any(normalized > 1):
raise ValueError("Overlapping index regions are not allowed.")
normalized, = np.where(normalized)
if np.any(normalized != sliceable):
raise ValueError("Index regions are out of order.")
return normalized
def _munge_to_sequence(self, other, method):
if isinstance(other, Sequence):
if type(other) != type(self):
raise TypeError("Cannot use %s and %s together with `%s`" %
(self.__class__.__name__,
other.__class__.__name__, method))
else:
return other
# We don't use self.__class__ or self._constructor here because we want
# to construct the most general type of Sequence object in order to
# avoid validation errors.
return Sequence(other)
def _munge_to_bytestring(self, other, method):
if type(other) is bytes:
return other
elif isinstance(other, six.string_types):
return other.encode('ascii')
else:
return self._munge_to_sequence(other, method)._string
@contextmanager
def _byte_ownership(self):
if not self._owns_bytes:
self._bytes = self._bytes.copy()
self._owns_bytes = True
self._bytes.flags.writeable = True
yield
self._bytes.flags.writeable = False
def _single_index_to_slice(start_index):
end_index = None if start_index == -1 else start_index+1
return slice(start_index, end_index)
def _is_single_index(index):
return (isinstance(index, numbers.Integral) and
not isinstance(index, bool))
def _as_slice_if_single_index(indexable):
if _is_single_index(indexable):
return _single_index_to_slice(indexable)
else:
return indexable
def _slices_from_iter(array, indexables):
for i in indexables:
if isinstance(i, slice):
pass
elif _is_single_index(i):
i = _single_index_to_slice(i)
else:
raise IndexError("Cannot slice sequence from iterable "
"containing %r." % i)
yield array[i]
class _SequenceReprBuilder(object):
"""Build a ``Sequence`` repr.
Parameters
----------
seq : Sequence
Sequence to repr.
width : int
Maximum width of the repr.
indent : int
Number of spaces to use for indented lines.
chunk_size: int
Number of characters in each chunk of a sequence.
"""
def __init__(self, seq, width, indent, chunk_size):
self._seq = seq
self._width = width
self._indent = ' ' * indent
self._chunk_size = chunk_size
def build(self):
lines = ElasticLines()
cls_name = self._seq.__class__.__name__
lines.add_line(cls_name)
lines.add_separator()
if self._seq.has_metadata():
lines.add_line('Metadata:')
# Python 3 doesn't allow sorting of mixed types so we can't just
# use sorted() on the metadata keys. Sort first by type then sort
# by value within each type.
for key in self._sorted_keys_grouped_by_type(self._seq.metadata):
value = self._seq.metadata[key]
lines.add_lines(self._format_metadata_key_value(key, value))
if self._seq.has_positional_metadata():
lines.add_line('Positional metadata:')
for key in self._seq.positional_metadata.columns.values.tolist():
dtype = self._seq.positional_metadata[key].dtype
lines.add_lines(
self._format_positional_metadata_column(key, dtype))
lines.add_line('Stats:')
for label, value in self._seq._repr_stats():
lines.add_line('%s%s: %s' % (self._indent, label, value))
lines.add_separator()
num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
# display entire sequence if we can, else display the first two and
# last two lines separated by ellipsis
if num_lines <= 5:
lines.add_lines(self._format_chunked_seq(
range(num_lines), num_chars, column_width))
else:
lines.add_lines(self._format_chunked_seq(
range(2), num_chars, column_width))
lines.add_line('...')
lines.add_lines(self._format_chunked_seq(
range(num_lines - 2, num_lines), num_chars, column_width))
return lines.to_str()
def _sorted_keys_grouped_by_type(self, dict_):
"""Group keys within a dict by their type and sort within type."""
type_sorted = sorted(dict_, key=self._type_sort_key)
type_and_value_sorted = []
for _, group in itertools.groupby(type_sorted, self._type_sort_key):
type_and_value_sorted.extend(sorted(group))
return type_and_value_sorted
def _type_sort_key(self, key):
return repr(type(key))
def _format_metadata_key_value(self, key, value):
"""Format metadata key:value, wrapping across lines if necessary."""
key_fmt = self._format_key(key)
supported_type = True
if isinstance(value, (six.text_type, six.binary_type)):
# for stringy values, there may be u'' or b'' depending on the type
# of `value` and version of Python. find the starting quote
# character so that wrapped text will line up with that instead of
# the string literal prefix character. for example:
#
# 'foo': u'abc def ghi
# jkl mno'
value_repr = repr(value)
extra_indent = 1
if not (value_repr.startswith("'") or value_repr.startswith('"')):
extra_indent = 2
# handles any number, this includes bool
elif value is None or isinstance(value, numbers.Number):
value_repr = repr(value)
extra_indent = 0
else:
supported_type = False
if not supported_type or len(value_repr) > 140:
value_repr = str(type(value))
# extra indent of 1 so that wrapped text lines up past the bracket:
#
# 'foo': <type
# 'dict'>
extra_indent = 1
return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
def _format_key(self, key):
"""Format metadata key.
Includes initial indent and trailing colon and space:
<indent>'foo':<space>
"""
key_fmt = self._indent + repr(key)
supported_types = (six.text_type, six.binary_type, numbers.Number,
type(None))
if len(key_fmt) > (self._width / 2) or not isinstance(key,
supported_types):
key_fmt = self._indent + str(type(key))
return '%s: ' % key_fmt
def _wrap_text_with_indent(self, text, initial_text, extra_indent):
"""Wrap text across lines with an initial indentation.
For example:
'foo': 'abc def
ghi jkl
mno pqr'
<indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
lines are indented such that they line up with the start of the
previous line of wrapped text.
"""
return textwrap.wrap(
text, width=self._width, expand_tabs=False,
initial_indent=initial_text,
subsequent_indent=' ' * (len(initial_text) + extra_indent))
def _format_positional_metadata_column(self, key, dtype):
key_fmt = self._format_key(key)
dtype_fmt = '<dtype: %s>' % str(dtype)
return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
def _find_optimal_seq_chunking(self):
"""Find the optimal number of sequence chunks to fit on a single line.
Returns the number of lines the sequence will occupy, the number of
sequence characters displayed on each line, and the column width
necessary to display position info using the optimal number of sequence
chunks.
"""
# strategy: use an iterative approach to find the optimal number of
# sequence chunks per line. start with a single chunk and increase
# until the max line width is exceeded. when this happens, the previous
# number of chunks is optimal
num_lines = 0
num_chars = 0
column_width = 0
num_chunks = 1
not_exceeded = True
while not_exceeded:
line_len, new_chunk_info = self._compute_chunked_seq_line_len(
num_chunks)
not_exceeded = line_len <= self._width
if not_exceeded:
num_lines, num_chars, column_width = new_chunk_info
num_chunks += 1
return num_lines, num_chars, column_width
def _compute_chunked_seq_line_len(self, num_chunks):
"""Compute line length based on a number of chunks."""
num_chars = num_chunks * self._chunk_size
# ceil to account for partial line
num_lines = int(math.ceil(len(self._seq) / num_chars))
# position column width is fixed width, based on the number of
# characters necessary to display the position of the final line (all
# previous positions will be left justified using this width)
column_width = len('%d ' % ((num_lines - 1) * num_chars))
# column width + number of sequence characters + spaces between chunks
line_len = column_width + num_chars + (num_chunks - 1)
return line_len, (num_lines, num_chars, column_width)
def _format_chunked_seq(self, line_idxs, num_chars, column_width):
"""Format specified lines of chunked sequence data."""
lines = []
for line_idx in line_idxs:
seq_idx = line_idx * num_chars
chars = str(self._seq[seq_idx:seq_idx+num_chars])
chunked_chars = chunk_str(chars, self._chunk_size, ' ')
lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
return lines
|
SamStudio8/scikit-bio
|
skbio/sequence/_sequence.py
|
Python
|
bsd-3-clause
| 78,197
|
[
"scikit-bio"
] |
aee386de63e7fe9f7e3635462eef48e6c494d0b2eeaab1b87662c6337ba6cd92
|
"""The data models used in ARL:
.. note::
There are two visibility formats:
:class:`BlockVisibility` is conceived as an ingest and calibration format. The visibility
data are kept in a block of shape (number antennas, number antennas, number channels,
number polarisation). One block is kept per integration. The other columns are time and uvw.
The sampling in time is therefore the same for all baselines.
:class:`Visibility` is designed to hold coalesced data where the integration time and
channel width can vary with baseline length. The visibility data are kept in a visibility
vector of length equal to the number of polarisations. Everything else is a separate
column: time, frequency, uvw, channel_bandwidth, integration time.
"""
import sys
import logging
from copy import deepcopy
from typing import Union
import numpy
import warnings
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.wcs import FITSFixedWarning
warnings.simplefilter('ignore', FITSFixedWarning)
warnings.simplefilter('ignore', AstropyDeprecationWarning)
from data_models.polarisation import PolarisationFrame, ReceptorFrame
log = logging.getLogger(__name__)
class Configuration:
""" Describe a Configuration as locations in x,y,z, mount type, diameter, names, and
overall location
"""
def __init__(self, name='', data=None, location=None,
names="%s", xyz=None, mount="alt-az", frame="",
receptor_frame=ReceptorFrame("linear"),
diameter=None):
"""Configuration object describing data for processing
:param name:
:param data:
:param location:
:param names:
:param xyz:
:param mount:
:param frame:
:param receptor_frame:
:param diameter:
"""
if data is None and xyz is not None:
desc = [('names', 'U12'),
('xyz', 'f8', (3,)),
('diameter', 'f8'),
('mount', 'U5')]
nants = xyz.shape[0]
if isinstance(names, str):
names = [names % ant for ant in range(nants)]
if isinstance(mount, str):
mount = numpy.repeat(mount, nants)
data = numpy.zeros(shape=[nants], dtype=desc)
data['names'] = names
data['xyz'] = xyz
data['mount'] = mount
data['diameter'] = diameter
self.name = name
self.data = data
self.location = location
self.frame = frame
self.receptor_frame = receptor_frame
def __str__(self):
"""Default printer for Skycomponent
"""
s = "Configuration:\n"
s += "\nName: %s\n" % self.name
s += "\tNumber of antennas/stations: %s\n" % len(self.names)
s += "\tNames: %s\n" % self.names
s += "\tDiameter: %s\n" % self.diameter
s += "\tMount: %s\n" % self.mount
s += "\tXYZ: %s\n" % self.xyz
return s
def size(self):
""" Return size in GB
"""
size = 0
size += self.data.size * sys.getsizeof(self.data)
return size / 1024.0 / 1024.0 / 1024.0
@property
def names(self):
""" Names of the antennas/stations"""
return self.data['names']
@property
def diameter(self):
""" diameter of antennas/stations
"""
return self.data['diameter']
@property
def xyz(self):
""" XYZ locations of antennas/stations
"""
return self.data['xyz']
@property
def mount(self):
""" Mount type
"""
return self.data['mount']
class GainTable:
""" Gain table with data_models: time, antenna, gain[:, chan, rec, rec], weight columns
The weight is usually that output from gain solvers.
"""
def __init__(self, data=None, gain: numpy.array = None, time: numpy.array = None, interval=None,
weight: numpy.array = None, residual: numpy.array = None, frequency: numpy.array = None,
receptor_frame: ReceptorFrame = ReceptorFrame("linear"), phasecentre=None, configuration=None):
""" Create a gaintable from arrays
The definition of gain is:
Vobs = g_i g_j^* Vmodel
:param interval:
:param data:
:param gain: [:, nchan, nrec, nrec]
:param time: Centroid of solution
:param interval: Interval of validity
:param weight:
:param residual:
:param frequency:
:param receptor_frame:
:return: Gaintable
"""
if data is None and gain is not None:
nrec = receptor_frame.nrec
nrows = gain.shape[0]
nants = gain.shape[1]
nchan = gain.shape[2]
assert len(frequency) == nchan, "Discrepancy in frequency channels"
desc = [('gain', 'c16', (nants, nchan, nrec, nrec)),
('weight', 'f8', (nants, nchan, nrec, nrec)),
('residual', 'f8', (nchan, nrec, nrec)),
('time', 'f8'),
('interval', 'f8')]
data = numpy.zeros(shape=[nrows], dtype=desc)
data['gain'] = gain
data['weight'] = weight
data['time'] = time
data['interval'] = interval
data['residual'] = residual
self.data = data
self.frequency = frequency
self.receptor_frame = receptor_frame
self.phasecentre = phasecentre
self.configuration = configuration
def size(self):
""" Return size in GB
"""
size = 0
size += self.data.size * sys.getsizeof(self.data)
return size / 1024.0 / 1024.0 / 1024.0
@property
def time(self):
return self.data['time']
@property
def interval(self):
return self.data['interval']
@property
def gain(self):
return self.data['gain']
@property
def weight(self):
return self.data['weight']
@property
def residual(self):
return self.data['residual']
@property
def ntimes(self):
return self.data['gain'].shape[0]
@property
def nants(self):
return self.data['gain'].shape[1]
@property
def nchan(self):
return self.data['gain'].shape[2]
@property
def nrec(self):
return self.receptor_frame.nrec
def __str__(self):
"""Default printer for GainTable
"""
s = "GainTable:\n"
s += "\tTimes: %s\n" % str(self.ntimes)
s += "\tData shape: %s\n" % str(self.data.shape)
s += "\tReceptor frame: %s\n" % str(self.receptor_frame.type)
s += "\tPhasecentre: %s\n" % str(self.phasecentre)
return s
class PointingTable:
""" Pointing table with data_models: time, antenna, offset[:, chan, rec, 2], weight columns
The weight is usually that output from gain solvers.
"""
def __init__(self, data=None, pointing: numpy.array = None, nominal: numpy.array = None,
time: numpy.array = None, interval=None,
weight: numpy.array = None, residual: numpy.array = None, frequency: numpy.array = None,
receptor_frame: ReceptorFrame = ReceptorFrame("linear"), pointing_frame: str = "local",
pointingcentre=None, configuration=None):
""" Create a pointing from arrays
:param interval:
:param data:
:param pointing: [:, nchan, nrec, 2]
:param nominal: [:, nchan, nrec, 2]
:param time: Centroid of solution
:param interval: Interval of validity
:param weight:
:param residual:
:param frequency:
:param receptor_frame:
:return: PointingTable
"""
if data is None and pointing is not None:
nrec = receptor_frame.nrec
nrows = pointing.shape[0]
nants = pointing.shape[1]
nchan = pointing.shape[2]
assert len(frequency) == nchan, "Discrepancy in frequency channels"
desc = [('pointing', 'f16', (nants, nchan, nrec, 2)),
('nominal', 'f16', (nants, nchan, nrec, 2)),
('weight', 'f8', (nants, nchan, nrec, 2)),
('residual', 'f8', (nchan, nrec, 2)),
('time', 'f8'),
('interval', 'f8')]
data = numpy.zeros(shape=[nrows], dtype=desc)
data['pointing'] = pointing
data['weight'] = weight
data['time'] = time
data['interval'] = interval
data['residual'] = residual
data['nominal'] = nominal
self.data = data
self.frequency = frequency
self.receptor_frame = receptor_frame
self.pointing_frame = pointing_frame
self.pointingcentre = pointingcentre
self.configuration = configuration
def size(self):
""" Return size in GB
"""
size = 0
size += self.data.size * sys.getsizeof(self.data)
return size / 1024.0 / 1024.0 / 1024.0
@property
def time(self):
return self.data['time']
@property
def interval(self):
return self.data['interval']
@property
def nominal(self):
return self.data['nominal']
@property
def pointing(self):
return self.data['pointing']
@property
def weight(self):
return self.data['weight']
@property
def residual(self):
return self.data['residual']
@property
def ntimes(self):
return self.data['pointing'].shape[0]
@property
def nants(self):
return self.data['pointing'].shape[1]
@property
def nchan(self):
return self.data['pointing'].shape[2]
@property
def nrec(self):
return self.receptor_frame.nrec
def __str__(self):
"""Default printer for GainTable
"""
s = "PointingTable:\n"
s += "\tTimes: %s\n" % str(self.ntimes)
s += "\tData shape: %s\n" % str(self.data.shape)
s += "\tReceptor frame: %s\n" % str(self.receptor_frame.type)
s += "\tPointing centre: %s\n" % str(self.pointingcentre)
s += "\tConfiguration: %s\n" % str(self.configuration)
return s
class Image:
"""Image class with Image data (as a numpy.array) and the AstroPy `implementation of
a World Coodinate System <http://docs.astropy.org/en/stable/wcs>`_
Many operations can be done conveniently using numpy processing_library on Image.data_models.
Most of the imaging processing_library require an image in canonical format:
- 4 axes: RA, DEC, POL, FREQ
The conventions for indexing in WCS and numpy are opposite.
- In astropy.wcs, the order is (longitude, latitude, polarisation, frequency)
- in numpy, the order is (frequency, polarisation, latitude, longitude)
.. warning::
The polarisation_frame is kept in two places, the WCS and the polarisation_frame
variable. The latter should be considered definitive.
"""
def __init__(self):
""" Empty image
"""
self.data = None
self.wcs = None
self.polarisation_frame = None
def size(self):
""" Return size in GB
"""
size = 0
size += self.data.nbytes
return size / 1024.0 / 1024.0 / 1024.0
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# noinspection PyArgumentList
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
@property
def nchan(self):
return self.data.shape[0]
@property
def npol(self):
return self.data.shape[1]
@property
def nheight(self):
return self.data.shape[2]
@property
def nwidth(self):
return self.data.shape[3]
@property
def frequency(self):
w = self.wcs.sub(['spectral'])
return w.wcs_pix2world(range(self.nchan), 0)[0]
@property
def shape(self):
return self.data.shape
@property
def phasecentre(self):
return SkyCoord(self.wcs.wcs.crval[0] * u.deg, self.wcs.wcs.crval[1] * u.deg)
def __str__(self):
"""Default printer for Image
"""
s = "Image:\n"
s += "\tShape: %s\n" % str(self.data.shape)
s += "\tWCS: %s\n" % self.wcs.__repr__()
s += "\tPolarisation frame: %s\n" % str(self.polarisation_frame.type)
return s
class GridData:
"""Class to hold Gridded data for Fourier processing
- Has four or more coordinates: [chan, pol, z, y, x] where x can be u, l; y can be v, m; z can be w, n
The conventions for indexing in WCS and numpy are opposite.
- In astropy.wcs, the order is (longitude, latitude, polarisation, frequency)
- in numpy, the order is (frequency, polarisation, depth, latitude, longitude)
.. warning::
The polarisation_frame is kept in two places, the WCS and the polarisation_frame
variable. The latter should be considered definitive.
"""
def __init__(self):
""" Empty image
"""
self.data = None
self.grid_wcs = None
self.projection_wcs = None
self.polarisation_frame = None
def size(self):
""" Return size in GB
"""
size = 0
size += self.data.nbytes
return size / 1024.0 / 1024.0 / 1024.0
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# noinspection PyArgumentList
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
@property
def nchan(self):
return self.data.shape[0]
@property
def npol(self):
return self.data.shape[1]
@property
def nheight(self):
return self.data.shape[2]
@property
def nwidth(self):
return self.data.shape[3]
@property
def ndepth(self):
return self.data.shape[4]
@property
def frequency(self):
w = self.grid_wcs.sub(['spectral'])
return w.wcs_pix2world(range(self.nchan), 0)[0]
@property
def shape(self):
assert len(self.data.shape) == 5
return self.data.shape
@property
def phasecentre(self):
return SkyCoord(self.projection_wcs.wcs.crval[0] * u.deg, self.projection_wcs.wcs.crval[1] * u.deg)
def __str__(self):
"""Default printer for GriddedData
"""
s = "Gridded data:\n"
s += "\tShape: %s\n" % str(self.data.shape)
s += "\tGrid WCS: %s\n" % self.grid_wcs
s += "\tProjection WCS: %s\n" % self.projection_wcs
s += "\tPolarisation frame: %s\n" % str(self.polarisation_frame.type)
return s
class ConvolutionFunction:
"""Class to hold Gridded data for Fourier processing
- Has four or more coordinates: [chan, pol, z, y, x] where x can be u, l; y can be v, m; z can be w, n
The conventions for indexing in WCS and numpy are opposite.
- In astropy.wcs, the order is (longitude, latitude, polarisation, frequency)
- in numpy, the order is (frequency, polarisation, depth, latitude, longitude)
.. warning::
The polarisation_frame is kept in two places, the WCS and the polarisation_frame
variable. The latter should be considered definitive.
"""
def __init__(self):
""" Empty image
"""
self.data = None
self.grid_wcs = None
self.projection_wcs = None
self.polarisation_frame = None
def size(self):
""" Return size in GB
"""
size = 0
size += self.data.nbytes
return size / 1024.0 / 1024.0 / 1024.0
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# noinspection PyArgumentList
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
@property
def nchan(self):
return self.data.shape[0]
@property
def npol(self):
return self.data.shape[1]
@property
def nheight(self):
return self.data.shape[2]
@property
def nwidth(self):
return self.data.shape[3]
@property
def ndepth(self):
return self.data.shape[4]
@property
def frequency(self):
w = self.grid_wcs.sub(['spectral'])
return w.wcs_pix2world(range(self.nchan), 0)[0]
@property
def shape(self):
assert len(self.data.shape) == 7
return self.data.shape
@property
def phasecentre(self):
return SkyCoord(self.projection_wcs.wcs.crval[0] * u.deg, self.projection_wcs.wcs.crval[1] * u.deg)
def __str__(self):
"""Default printer for GriddedData
"""
s = "Convolution function:\n"
s += "\tShape: %s\n" % str(self.data.shape)
s += "\tGrid WCS: %s\n" % self.grid_wcs
s += "\tProjection WCS: %s\n" % self.projection_wcs
s += "\tPolarisation frame: %s\n" % str(self.polarisation_frame.type)
return s
class Skycomponent:
"""Skycomponents are used to represent compact sources on the sky. They possess direction,
flux as a function of frequency and polarisation, shape (with params), and polarisation frame.
For example, the following creates and predicts the visibility from a collection of point sources
drawn from the GLEAM catalog::
sc = create_low_test_skycomponents_from_gleam(flux_limit=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"),
frequency=frequency, kind='cubic',
phasecentre=phasecentre,
radius=0.1)
model = create_image_from_visibility(vis, cellsize=0.001, npixel=512, frequency=frequency,
polarisation_frame=PolarisationFrame('stokesIQUV'))
bm = create_low_test_beam(model=model)
sc = apply_beam_to_skycomponent(sc, bm)
vis = predict_skycomponent_visibility(vis, sc)
"""
def __init__(self,
direction=None, frequency=None, name=None, flux=None, shape='Point',
polarisation_frame=PolarisationFrame('stokesIQUV'), params=None):
""" Define the required structure
:param direction: SkyCoord
:param frequency: numpy.array [nchan]
:param name: user friendly name
:param flux: numpy.array [nchan, npol]
:param shape: str e.g. 'Point' 'Gaussian'
:param params: numpy.array shape dependent parameters
:param polarisation_frame: Polarisation_frame
"""
self.direction = direction
self.frequency = numpy.array(frequency)
self.name = name
self.flux = numpy.array(flux)
self.shape = shape
if params is None:
params = {}
self.params = params
self.polarisation_frame = polarisation_frame
assert len(self.frequency.shape) == 1, frequency
assert len(self.flux.shape) == 2, flux
assert self.frequency.shape[0] == self.flux.shape[0], \
"Frequency shape %s, flux shape %s" % (self.frequency.shape, self.flux.shape)
assert polarisation_frame.npol == self.flux.shape[1], \
"Polarisation is %s, flux shape %s" % (polarisation_frame.type, self.flux.shape)
@property
def nchan(self):
return self.flux.shape[0]
@property
def npol(self):
return self.flux.shape[1]
def __str__(self):
"""Default printer for Skycomponent
"""
s = "Skycomponent:\n"
s += "\tName: %s\n" % self.name
s += "\tFlux: %s\n" % self.flux
s += "\tFrequency: %s\n" % self.frequency
s += "\tDirection: %s\n" % self.direction
s += "\tShape: %s\n" % self.shape
s += "\tParams: %s\n" % self.params
s += "\tPolarisation frame: %s\n" % str(self.polarisation_frame.type)
return s
class SkyModel:
""" A model for the sky, including an image, components, gaintable and a mask
"""
def __init__(self, image=None, components=None, gaintable=None, mask=None, fixed=False):
""" A model of the sky as an image, components, gaintable and a mask
Use copy_skymodel to make a proper copy of skymodel
"""
if components is None:
components = []
if image is not None:
assert isinstance(image, Image), image
self.image = image
if components is not None:
assert isinstance(components, list)
for comp in components:
assert isinstance(comp, Skycomponent), comp
self.components = [sc for sc in components]
if gaintable is not None:
assert isinstance(gaintable, GainTable), gaintable
self.gaintable = gaintable
if mask is not None:
assert isinstance(mask, Image), mask
self.mask = mask
self.fixed = fixed
def __str__(self):
"""Default printer for SkyModel
"""
s = "SkyModel: fixed: %s\n" % self.fixed
for i, sc in enumerate(self.components):
s += str(sc)
s += "\n"
s += str(self.image)
s += "\n"
s += str(self.mask)
s += "\n"
s += str(self.gaintable)
return s
class Visibility:
""" Visibility table class
Visibility with uvw, time, integration_time, frequency, channel_bandwidth, a1, a2, vis, weight
as separate columns in a numpy structured array, The fundemental unit is a complex vector of polarisation.
Visibility is defined to hold an observation with one direction.
Polarisation frame is the same for the entire data set and can be stokes, circular, linear
The configuration is also an attribute
The phasecentre is the direct of delay tracking i.e. n=0. If uvw are rotated then this
should be updated with the new delay tracking centre. This is important for wstack and wproject
algorithms.
If a visibility is created by coalescence then the cindex column is filled with a pointer to the
row in the original block visibility that this row has a value for. The original blockvisibility
is also preserves as n attribute so that decoalescence is expedited. If you don't need that then
the storage can be released by setting self.blockvis to None
"""
def __init__(self,
data=None, frequency=None, channel_bandwidth=None,
phasecentre=None, configuration=None, uvw=None,
time=None, antenna1=None, antenna2=None, vis=None,
weight=None, imaging_weight=None, integration_time=None,
polarisation_frame=PolarisationFrame('stokesI'), cindex=None,
blockvis=None, source='anonymous', meta=None):
"""Visibility
:param data:
:param frequency:
:param channel_bandwidth:
:param phasecentre:
:param configuration:
:param uvw:
:param time:
:param antenna1:
:param antenna2:
:param vis:
:param weight:
:param imaging_weight:
:param integration_time:
:param polarisation_frame:
:param cindex:
:param blockvis:
:param source:
:param meta:
"""
if data is None and vis is not None:
if imaging_weight is None:
imaging_weight = weight
nvis = vis.shape[0]
assert len(time) == nvis
assert len(frequency) == nvis
assert len(channel_bandwidth) == nvis
assert len(antenna1) == nvis
assert len(antenna2) == nvis
npol = polarisation_frame.npol
desc = [('index', 'i8'),
('uvw', 'f8', (3,)),
('time', 'f8'),
('frequency', 'f8'),
('channel_bandwidth', 'f8'),
('integration_time', 'f8'),
('antenna1', 'i8'),
('antenna2', 'i8'),
('vis', 'c16', (npol,)),
('weight', 'f8', (npol,)),
('imaging_weight', 'f8', (npol,))]
data = numpy.zeros(shape=[nvis], dtype=desc)
data['index'] = list(range(nvis))
data['uvw'] = uvw
data['time'] = time
data['frequency'] = frequency
data['channel_bandwidth'] = channel_bandwidth
data['integration_time'] = integration_time
data['antenna1'] = antenna1
data['antenna2'] = antenna2
data['vis'] = vis
data['weight'] = weight
data['imaging_weight'] = imaging_weight
self.data = data # numpy structured array
self.cindex = cindex
self.blockvis = blockvis
self.phasecentre = phasecentre # Phase centre of observation
self.configuration = configuration # Antenna/station configuration
self.polarisation_frame = polarisation_frame
self.frequency_map = None
self.source = source
self.meta = meta
def __str__(self):
"""Default printer for Visibility
"""
ufrequency = numpy.unique(self.frequency)
uchannel_bandwidth = numpy.unique(self.channel_bandwidth)
s = "Visibility:\n"
s += "\tSource: %s\n" % self.source
s += "\tNumber of visibilities: %s\n" % self.nvis
s += "\tNumber of channels: %d\n" % len(ufrequency)
s += "\tFrequency: %s\n" % ufrequency
s += "\tChannel bandwidth: %s\n" % uchannel_bandwidth
s += "\tNumber of polarisations: %s\n" % self.npol
s += "\tVisibility shape: %s\n" % str(self.vis.shape)
s += "\tPolarisation Frame: %s\n" % self.polarisation_frame.type
s += "\tPhasecentre: %s\n" % self.phasecentre
s += "\tConfiguration: %s\n" % self.configuration.name
s += "\tMetadata: %s\n" % self.meta
return s
def size(self):
""" Return size in GB
"""
size = 0
for col in self.data.dtype.fields.keys():
size += self.data[col].nbytes
return size / 1024.0 / 1024.0 / 1024.0
@property
def index(self):
return self.data['index']
@property
def npol(self):
return self.polarisation_frame.npol
@property
def nvis(self):
return self.data['vis'].shape[0]
@property
def uvw(self): # In wavelengths in Visibility
return self.data['uvw']
@property
def u(self):
return self.data['uvw'][:, 0]
@property
def v(self):
return self.data['uvw'][:, 1]
@property
def w(self):
return self.data['uvw'][:, 2]
@property
def uvdist(self):
return numpy.hypot(self.u, self.v)
@property
def uvwdist(self):
return numpy.hypot(self.u, self.v, self.w)
@property
def time(self):
return self.data['time']
@property
def integration_time(self):
return self.data['integration_time']
@property
def frequency(self):
return self.data['frequency']
@property
def channel_bandwidth(self):
return self.data['channel_bandwidth']
@property
def antenna1(self):
return self.data['antenna1']
@property
def antenna2(self):
return self.data['antenna2']
@property
def vis(self):
return self.data['vis']
@property
def weight(self):
return self.data['weight']
@property
def imaging_weight(self):
return self.data['imaging_weight']
class BlockVisibility:
""" Block Visibility table class
BlockVisibility with uvw, time, integration_time, frequency, channel_bandwidth, pol,
a1, a2, vis, weight Columns in a numpy structured array.
BlockVisibility is defined to hold an observation with one direction.
The phasecentre is the direct of delay tracking i.e. n=0. If uvw are rotated then this
should be updated with the new delay tracking centre. This is important for wstack and wproject
algorithms.
Polarisation frame is the same for the entire data set and can be stokesI, circular, linear
The configuration is also an attribute
"""
def __init__(self,
data=None, frequency=None, channel_bandwidth=None,
phasecentre=None, configuration=None, uvw=None,
time=None, vis=None, weight=None, integration_time=None,
polarisation_frame=PolarisationFrame('stokesI'),
imaging_weight=None, source='anonymous', meta=dict()):
"""BlockVisibility
:param data:
:param frequency:
:param channel_bandwidth:
:param phasecentre:
:param configuration:
:param uvw:
:param time:
:param vis:
:param weight:
:param integration_time:
:param polarisation_frame:
:param source:
"""
if data is None and vis is not None:
ntimes, nants, _, nchan, npol = vis.shape
assert vis.shape == weight.shape
assert len(frequency) == nchan
assert len(channel_bandwidth) == nchan
desc = [('index', 'i8'),
('uvw', 'f8', (nants, nants, 3)),
('time', 'f8'),
('integration_time', 'f8'),
('vis', 'c16', (nants, nants, nchan, npol)),
('weight', 'f8', (nants, nants, nchan, npol)),
('imaging_weight', 'f8', (nants, nants, nchan, npol))]
data = numpy.zeros(shape=[ntimes], dtype=desc)
data['index'] = list(range(ntimes))
data['uvw'] = uvw
data['time'] = time # MJD in seconds
data['integration_time'] = integration_time # seconds
data['vis'] = vis
data['weight'] = weight
data['imaging_weight'] = imaging_weight
self.data = data # numpy structured array
self.frequency = frequency
self.channel_bandwidth = channel_bandwidth
self.phasecentre = phasecentre # Phase centre of observation
self.configuration = configuration # Antenna/station configuration
self.polarisation_frame = polarisation_frame
self.source = source
self.meta = meta
def __str__(self):
"""Default printer for BlockVisibility
"""
s = "BlockVisibility:\n"
s += "\tSource %s\n" % self.source
s += "\tPhasecentre: %s\n" % self.phasecentre
s += "\tNumber of visibilities: %s\n" % self.nvis
s += "\tNumber of integrations: %s\n" % len(self.time)
s += "\tVisibility shape: %s\n" % str(self.vis.shape)
s += "\tNumber of channels: %d\n" % len(self.frequency)
s += "\tFrequency: %s\n" % self.frequency
s += "\tChannel bandwidth: %s\n" % self.channel_bandwidth
s += "\tNumber of polarisations: %s\n" % self.npol
s += "\tPolarisation Frame: %s\n" % self.polarisation_frame.type
s += "\tConfiguration: %s\n" % self.configuration.name
s += "\tMetadata: %s\n" % self.meta
return s
def size(self):
""" Return size in GB
"""
size = 0
for col in self.data.dtype.fields.keys():
size += self.data[col].nbytes
return size / 1024.0 / 1024.0 / 1024.0
@property
def nchan(self):
return self.data['vis'].shape[3]
@property
def npol(self):
return self.data['vis'].shape[4]
@property
def nants(self):
return self.data['vis'].shape[1]
@property
def uvw(self): # In meters
return self.data['uvw']
@property
def u(self):
return self.data['uvw'][..., 0]
@property
def v(self):
return self.data['uvw'][..., 1]
@property
def w(self):
return self.data['uvw'][..., 2]
@property
def uvdist(self):
return numpy.hypot(self.u, self.v)
@property
def uvwdist(self):
return numpy.hypot(self.u, self.v, self.w)
@property
def vis(self):
return self.data['vis']
@property
def weight(self):
return self.data['weight']
@property
def time(self):
return self.data['time']
@property
def integration_time(self):
return self.data['integration_time']
@property
def nvis(self):
return self.data.size
@property
def imaging_weight(self):
return self.data['imaging_weight']
class QA:
""" Quality assessment
"""
def __init__(self, origin=None, data=None, context=None):
"""QA
:param origin:
:param data:
:param context:
"""
self.origin = origin # Name of function originating QA assessment
self.data = data # Dictionary containing standard fields
self.context = context # Context string
def __str__(self):
"""Default printer for QA
"""
s = "Quality assessment:\n"
s += "\tOrigin: %s\n" % self.origin
s += "\tContext: %s\n" % self.context
s += "\tData:\n"
for dataname in self.data.keys():
s += "\t\t%s: %r\n" % (dataname, str(self.data[dataname]))
return s
class ScienceDataModel:
""" Science Data Model"""
def __init__(self):
pass
def __str__(self):
""" Deflaut printer for Science Data Model
:return:
"""
return ""
def assert_same_chan_pol(o1, o2):
"""
Assert that two entities indexed over channels and polarisations
have the same number of them.
"""
assert o1.npol == o2.npol, \
"%s and %s have different number of polarisations: %d != %d" % \
(type(o1).__name__, type(o2).__name__, o1.npol, o2.npol)
if isinstance(o1, BlockVisibility) and isinstance(o2, BlockVisibility):
assert o1.nchan == o2.nchan, \
"%s and %s have different number of channels: %d != %d" % \
(type(o1).__name__, type(o2).__name__, o1.nchan, o2.nchan)
def assert_vis_gt_compatible(vis: Union[Visibility, BlockVisibility], gt: GainTable):
""" Check if visibility and gaintable are compatible
:param vis:
:param gt:
:return:
"""
assert vis.nchan == gt.nchan
assert vis.npol == gt.nrec * gt.nrec
|
SKA-ScienceDataProcessor/algorithm-reference-library
|
data_models/memory_data_models.py
|
Python
|
apache-2.0
| 35,339
|
[
"Gaussian"
] |
a1b25c8edfafd76f3573353645a6e5b902c9c0e0ebc60317b095289d06adfc97
|
import pytest
import numpy as np
from math import floor
from conftest import opts_tiling
from devito import (Grid, Function, TimeFunction, Eq, solve, Operator, SubDomain,
SubDomainSet, Dimension)
from devito.ir import FindNodes, Expression
from devito.tools import timed_region
class TestSubdomains(object):
"""
Class for testing SubDomains
"""
def test_subdomain_dim(self):
"""
Test that all dimensions including ones used as an expression
are replaced by the subdimension dimensions.
"""
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 1, 6), y: ('middle', 1, 1)}
s_d0 = sd0()
grid = Grid(shape=(10, 10), subdomains=(s_d0,))
x, y = grid.dimensions
x1, y1 = s_d0.dimensions
f = Function(name='f', grid=grid, dtype=np.int32)
eq0 = Eq(f, x*f+y, subdomain=grid.subdomains['d0'])
with timed_region('x'):
expr = Operator._lower_exprs([eq0])[0]
assert expr.rhs == x1 * f[x1 + 1, y1 + 1] + y1
def test_multiple_middle(self):
"""
Test Operator with two basic 'middle' subdomains defined.
"""
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 1, 6), y: ('middle', 1, 1)}
s_d0 = sd0()
class sd1(SubDomain):
name = 'd1'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 6, 1), y: ('middle', 1, 1)}
s_d1 = sd1()
grid = Grid(shape=(10, 10), subdomains=(s_d0, s_d1))
f = Function(name='f', grid=grid, dtype=np.int32)
eq0 = Eq(f, f+1, subdomain=grid.subdomains['d0'])
eq1 = Eq(f, f+2, subdomain=grid.subdomains['d1'])
Operator([eq0, eq1])()
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32)
assert((np.array(f.data) == expected).all())
def test_shape(self):
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 1, 6), y: ('middle', 1, 1)}
s_d0 = sd0()
class sd1(SubDomain):
name = 'd1'
def define(self, dimensions):
x, y = dimensions
return {x: ('right', 4), y: ('left', 2)}
s_d1 = sd1()
class sd2(SubDomain):
name = 'd2'
def define(self, dimensions):
x, y = dimensions
return {x: ('left', 3), y: ('middle', 1, 2)}
s_d2 = sd2()
grid = Grid(shape=(10, 10), subdomains=(s_d0, s_d1, s_d2))
assert grid.subdomains['domain'].shape == (10, 10)
assert grid.subdomains['interior'].shape == (8, 8)
assert grid.subdomains['d0'].shape == (3, 8)
assert grid.subdomains['d1'].shape == (4, 2)
assert grid.subdomains['d2'].shape == (3, 7)
def test_definitions(self):
class sd0(SubDomain):
name = 'sd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 2, 2), y: ('right', 10)}
class sd1(SubDomain):
name = 'sd1'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 2, 2), y: ('left', 10)}
class sd2(SubDomain):
name = 'sd2'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 2, 2), y: y}
class sd3(SubDomain):
name = 'sd3'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 2, 2), y: ('middle', 0, 0)}
sd_def0 = sd0()
sd_def1 = sd1()
sd_def2 = sd2()
sd_def3 = sd3()
grid = Grid(shape=(10, 10), extent=(10, 10),
subdomains=(sd_def0, sd_def1, sd_def2, sd_def3))
u0 = Function(name='u0', grid=grid)
u1 = Function(name='u1', grid=grid)
u2 = Function(name='u2', grid=grid)
u3 = Function(name='u3', grid=grid)
eq0 = Eq(u0, u0+1, subdomain=grid.subdomains['sd0'])
eq1 = Eq(u1, u1+1, subdomain=grid.subdomains['sd1'])
eq2 = Eq(u2, u2+1, subdomain=grid.subdomains['sd2'])
eq3 = Eq(u3, u3+1, subdomain=grid.subdomains['sd3'])
Operator([eq0, eq1, eq2, eq3])()
assert u0.data.all() == u1.data.all() == u2.data.all() == u3.data.all()
@pytest.mark.parametrize('opt', opts_tiling)
def test_iterate_NDomains(self, opt):
"""
Test that a set of subdomains are iterated upon correctly.
"""
n_domains = 10
class Inner(SubDomainSet):
name = 'inner'
bounds_xm = np.zeros((n_domains,), dtype=np.int32)
bounds_xM = np.zeros((n_domains,), dtype=np.int32)
bounds_ym = np.zeros((n_domains,), dtype=np.int32)
bounds_yM = np.zeros((n_domains,), dtype=np.int32)
for j in range(0, n_domains):
bounds_xm[j] = j
bounds_xM[j] = n_domains-1-j
bounds_ym[j] = floor(j/2)
bounds_yM[j] = floor(j/2)
bounds = (bounds_xm, bounds_xM, bounds_ym, bounds_yM)
inner_sd = Inner(N=n_domains, bounds=bounds)
grid = Grid(extent=(10, 10), shape=(10, 10), subdomains=(inner_sd, ))
f = TimeFunction(name='f', grid=grid, dtype=np.int32)
f.data[:] = 0
stencil = Eq(f.forward, solve(Eq(f.dt, 1), f.forward),
subdomain=grid.subdomains['inner'])
op = Operator(stencil, opt=opt)
op(time_m=0, time_M=9, dt=1)
result = f.data[0]
expected = np.zeros((10, 10), dtype=np.int32)
for j in range(0, n_domains):
expected[j, bounds_ym[j]:n_domains-bounds_yM[j]] = 10
assert((np.array(result) == expected).all())
def test_multi_eq(self):
"""
Test SubDomainSet functionality when multiple equations are
present.
"""
Nx = 10
Ny = Nx
n_domains = 2
class MySubdomains(SubDomainSet):
name = 'mydomains'
bounds_xm = np.array([1, Nx/2+1], dtype=np.int32)
bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32)
bounds_ym = 1
bounds_yM = 1
bounds = (bounds_xm, bounds_xM, bounds_ym, bounds_yM)
my_sd = MySubdomains(N=n_domains, bounds=bounds)
grid = Grid(extent=(Nx, Ny), shape=(Nx, Ny), subdomains=(my_sd, ))
assert(grid.subdomains['mydomains'].shape == ((3, 8), (3, 8)))
f = Function(name='f', grid=grid, dtype=np.int32)
g = Function(name='g', grid=grid, dtype=np.int32)
h = Function(name='h', grid=grid, dtype=np.int32)
eq1 = Eq(f, f+1, subdomain=grid.subdomains['mydomains'])
eq2 = Eq(g, g+1)
eq3 = Eq(h, h+2, subdomain=grid.subdomains['mydomains'])
op = Operator([eq1, eq2, eq3])
op.apply()
expected1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32)
expected2 = np.full((10, 10), 1, dtype=np.int32)
expected3 = 2*expected1
assert((np.array(f.data) == expected1).all())
assert((np.array(g.data) == expected2).all())
assert((np.array(h.data) == expected3).all())
# Also make sure the Functions carrying the subdomain bounds are
# unique -- see issue #1474
exprs = FindNodes(Expression).visit(op)
reads = set().union(*[e.reads for e in exprs])
assert len(reads) == 7 # f, g, h, xi_n_m, xi_n_M, yi_n_m, yi_n_M
def test_multi_sets(self):
"""
Check functionality for when multiple subdomain sets are present.
"""
Nx = 10
Ny = Nx
n_domains = 2
n = Dimension(name='n')
m = Dimension(name='m')
class MySubdomains1(SubDomainSet):
name = 'mydomains1'
implicit_dimension = n
class MySubdomains2(SubDomainSet):
name = 'mydomains2'
implicit_dimension = m
bounds_xm = np.array([1, Nx/2+1], dtype=np.int32)
bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32)
bounds_ym = int(1)
bounds_yM = int(Ny/2+1)
bounds1 = (bounds_xm, bounds_xM, bounds_ym, bounds_yM)
bounds_xm = np.array([1, Nx/2+1], dtype=np.int32)
bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32)
bounds_ym = int(Ny/2+1)
bounds_yM = int(1)
bounds2 = (bounds_xm, bounds_xM, bounds_ym, bounds_yM)
my_sd1 = MySubdomains1(N=n_domains, bounds=bounds1)
my_sd2 = MySubdomains2(N=n_domains, bounds=bounds2)
grid = Grid(extent=(Nx, Ny), shape=(Nx, Ny), subdomains=(my_sd1, my_sd2))
f = Function(name='f', grid=grid, dtype=np.int32)
g = Function(name='g', grid=grid, dtype=np.int32)
eq1 = Eq(f, f+1, subdomain=grid.subdomains['mydomains1'])
eq2 = Eq(g, g+2, subdomain=grid.subdomains['mydomains2'])
op = Operator([eq1, eq2])
op.apply()
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 2, 2, 2, 0],
[0, 1, 1, 1, 0, 0, 2, 2, 2, 0],
[0, 1, 1, 1, 0, 0, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 2, 2, 2, 0],
[0, 1, 1, 1, 0, 0, 2, 2, 2, 0],
[0, 1, 1, 1, 0, 0, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32)
assert((np.array(f.data[:]+g.data[:]) == expected).all())
@pytest.mark.parallel(mode=[(4, 'basic'), (4, 'overlap')])
def test_subdomainset_mpi(self):
n_domains = 5
class Inner(SubDomainSet):
name = 'inner'
bounds_xm = np.zeros((n_domains,), dtype=np.int32)
bounds_xM = np.zeros((n_domains,), dtype=np.int32)
bounds_ym = np.zeros((n_domains,), dtype=np.int32)
bounds_yM = np.zeros((n_domains,), dtype=np.int32)
for j in range(0, n_domains):
bounds_xm[j] = j
bounds_xM[j] = j
bounds_ym[j] = j
bounds_yM[j] = 2*n_domains-1-j
bounds = (bounds_xm, bounds_xM, bounds_ym, bounds_yM)
inner_sd = Inner(N=n_domains, bounds=bounds)
grid = Grid(extent=(10, 10), shape=(10, 10), subdomains=(inner_sd, ))
assert(grid.subdomains['inner'].shape == ((10, 1), (8, 1), (6, 1),
(4, 1), (2, 1)))
f = TimeFunction(name='f', grid=grid, dtype=np.int32)
f.data[:] = 0
stencil = Eq(f.forward, solve(Eq(f.dt, 1), f.forward),
subdomain=grid.subdomains['inner'])
op = Operator(stencil)
op(time_m=0, time_M=9, dt=1)
result = f.data[0]
fex = Function(name='fex', grid=grid)
expected = np.zeros((10, 10), dtype=np.int32)
for j in range(0, n_domains):
expected[j, j:10-j] = 10
fex.data[:] = np.transpose(expected)
assert((np.array(result) == np.array(fex.data[:])).all())
def test_multi_sets_eq(self):
"""
Check functionality for when multiple subdomain sets are present, each
with multiple equations.
"""
Nx = 10
Ny = Nx
n_domains = 2
n = Dimension(name='n')
m = Dimension(name='m')
class MySubdomains1(SubDomainSet):
name = 'mydomains1'
implicit_dimension = n
class MySubdomains2(SubDomainSet):
name = 'mydomains2'
implicit_dimension = m
bounds_xm = np.array([1, Nx/2+1], dtype=np.int32)
bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32)
bounds_ym = int(1)
bounds_yM = int(Ny/2+1)
bounds1 = (bounds_xm, bounds_xM, bounds_ym, bounds_yM)
bounds_xm = np.array([1, Nx/2+1], dtype=np.int32)
bounds_xM = np.array([Nx/2+1, 1], dtype=np.int32)
bounds_ym = int(Ny/2+1)
bounds_yM = int(1)
bounds2 = (bounds_xm, bounds_xM, bounds_ym, bounds_yM)
my_sd1 = MySubdomains1(N=n_domains, bounds=bounds1)
my_sd2 = MySubdomains2(N=n_domains, bounds=bounds2)
grid = Grid(extent=(Nx, Ny), shape=(Nx, Ny), subdomains=(my_sd1, my_sd2))
f = Function(name='f', grid=grid, dtype=np.int32)
g = Function(name='g', grid=grid, dtype=np.int32)
eq1 = Eq(f, f+2, subdomain=grid.subdomains['mydomains1'])
eq2 = Eq(g, g+2, subdomain=grid.subdomains['mydomains2'])
eq3 = Eq(f, f-1, subdomain=grid.subdomains['mydomains1'])
eq4 = Eq(g, g+1, subdomain=grid.subdomains['mydomains2'])
op = Operator([eq1, eq2, eq3, eq4])
op.apply()
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 3, 3, 3, 0],
[0, 1, 1, 1, 0, 0, 3, 3, 3, 0],
[0, 1, 1, 1, 0, 0, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 3, 3, 3, 0],
[0, 1, 1, 1, 0, 0, 3, 3, 3, 0],
[0, 1, 1, 1, 0, 0, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32)
assert((np.array(f.data[:]+g.data[:]) == expected).all())
|
opesci/devito
|
tests/test_subdomains.py
|
Python
|
mit
| 14,942
|
[
"VisIt"
] |
77ce1574c699b199c1d6cae89112d13250af34c806a670284b5bcb06ca1db6d9
|
# Author: Suyog Dutt Jain <suyog.jain@aero.iitb.ac.in>
# Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from io import BytesIO
import copy
import unittest
import numpy
# Enthought library imports
from mayavi.core.null_engine import NullEngine
from mayavi.sources.vtk_xml_file_reader import VTKXMLFileReader
from mayavi.modules.outline import Outline
from mayavi.modules.contour_grid_plane import ContourGridPlane
from mayavi.modules.scalar_cut_plane import ScalarCutPlane
# Local imports.
from .common import get_example_data
class TestVTKXMLReader(unittest.TestCase):
def setUp(self):
e = NullEngine()
# Uncomment to see visualization for debugging etc.
#e = Engine()
e.start()
e.new_scene()
self.e=e
# Read a VTK XML data file.
r = VTKXMLFileReader()
r.initialize(get_example_data('cube.vti'))
e.add_source(r)
# Create an outline for the data.
o = Outline()
e.add_module(o)
# Create one ContourGridPlane normal to the 'x' axis.
cgp1 = ContourGridPlane()
e.add_module(cgp1)
# Set the position to the middle of the data.
cgp1.grid_plane.position = 1
# Another with filled contours normal to 'y' axis.
cgp2 = ContourGridPlane()
cgp2.contour.filled_contours = True
# Set the axis and position to the middle of the data.
cgp2.grid_plane.axis = 'y'
cgp2.grid_plane.position = 1
e.add_module(cgp2)
# An interactive scalar cut plane.
cp = ScalarCutPlane()
e.add_module(cp)
ip = cp.implicit_plane
ip.normal = 0,0,1
ip.origin = 0.5, 0.5, 1.0
# Since this is running offscreen this seems necessary.
ip.widget.origin = 0.5, 0.5, 1.0
ip.widget.enabled = False
self.scene = e.current_scene
self.cgp2=cgp2
self.cp=cp
return
def tearDown(self):
"""For necessary clean up, automatically called by TestCase after the test methods have been invoked"""
self.e.stop()
return
def check(self):
"""Do the actual testing."""
scene = self.scene
src = scene.children[0]
mm = src.children[0]
cgp1 = mm.children[1]
self.assertEqual(cgp1.grid_plane.position,1)
cgp2 = mm.children[2]
self.assertEqual(cgp2.contour.filled_contours,True)
self.assertEqual(cgp2.grid_plane.axis, 'y')
self.assertEqual(cgp2.grid_plane.position,1)
cp = mm.children[3]
ip = cp.implicit_plane
self.assertAlmostEqual(numpy.sum(ip.normal - (0,0,1)) , 1e-16)
self.assertAlmostEqual(numpy.sum(ip.origin - (0.5, 0.5, 1.0)), 0.0)
self.assertEqual(ip.widget.enabled,False)
def test_vtk_xml_reader(self):
"Test if the test fixture works"
#Now test.
self.check()
#from mayavi.tools.show import show
#show()
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
engine = self.e
scene = self.scene
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
engine.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine.close_scene(scene)
# Load visualization
engine.load_visualization(f)
self.scene = engine.current_scene
self.check()
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
# Pop the source object.
s = self.scene
source = s.children.pop()
# Add it back to see if that works without error.
s.children.append(source)
cp = source.children[0].children[-1]
cp.implicit_plane.widget.enabled = False
self.check()
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
source1 = copy.deepcopy(source)
s.children[0] = source1
cp = source1.children[0].children[-1]
cp.implicit_plane.widget.enabled = False
self.check()
#from mayavi.tools.show import show
#show()
if __name__ == '__main__':
unittest.main()
|
dmsurti/mayavi
|
mayavi/tests/test_vtk_xml_reader.py
|
Python
|
bsd-3-clause
| 4,728
|
[
"Mayavi",
"VTK"
] |
2de86b7f5cf0ac41873192016dac02c4738dd74b8d53a729bb1459125e44615e
|
'''
Useful for:
* users learning xray
* building tutorials in the documentation.
'''
import os as _os
from .backends.api import open_dataset as _open_dataset
from .core.pycompat import urlretrieve as _urlretrieve
_default_cache_dir = _os.sep.join(('~', '.xray_tutorial_data'))
# idea borrowed from Seaborn
def load_dataset(name, cache=True, cache_dir=_default_cache_dir,
github_url='https://github.com/xray/xray-data', **kws):
"""
Load a dataset from the online repository (requires internet).
If a local copy is found then always use that to avoid network traffic.
Parameters
----------
name : str
Name of the netcdf file containing the dataset
ie. 'air_temperature'
cache_dir : string, optional
The directory in which to search for and write cached data.
cache : boolean, optional
If True, then cache data locally for use on subsequent calls
github_url : string
Github repository where the data is stored
kws : dict, optional
Passed to xray.open_dataset
"""
longdir = _os.path.expanduser(cache_dir)
fullname = name + '.nc'
localfile = _os.sep.join((longdir, fullname))
if not _os.path.exists(localfile):
# This will always leave this directory on disk.
# May want to add an option to remove it.
if not _os.path.isdir(longdir):
_os.mkdir(longdir)
url = '/'.join((github_url, 'raw', 'master', fullname))
_urlretrieve(url, localfile)
ds = _open_dataset(localfile, **kws).load()
if not cache:
_os.remove(localfile)
return ds
|
petercable/xray
|
xray/tutorial.py
|
Python
|
apache-2.0
| 1,638
|
[
"NetCDF"
] |
d9ed929161180e5a3c203bc5b82b51fee0e3b39e9eb2f0a82895ca043ce4cbba
|
import unittest
from mjhmc.samplers.algebraic_hmc import (StateGroup, AlgebraicDiscrete,
AlgebraicContinuous, AlgebraicReducedFlip,
AlgebraicHMC)
import itertools
import numpy as np
from mjhmc.misc.utils import overrides
n_seed = 1
# error tolerance
EPS = .01
class TestAlgebraicDiscrete(unittest.TestCase):
"""
Base test class for algebraic samplers
Inheritance in unittest is funky so I'm just setting the discrete sampler
to be the base class
"""
def setUp(self):
np.random.seed(n_seed)
self.n_sampler_steps = 5000
self.sampler_to_test = AlgebraicDiscrete
def test_analytic_transition_matrix(self):
"""
tests that the computed analytic matrix is within a tolerance to the empirical matrix
uses np.arange(order / 2) as the test distribution for the energies. Tests {}
""".format(self.sampler_to_test.__name__)
self.max_n_energies = 5
for order in (np.arange(2, self.max_n_energies) * 2):
sampler = self.sampler_to_test(
order,
energies=np.arange(order / 2)
)
sampler.sample(self.n_sampler_steps, burn_in=True)
analytic_T = sampler.calculate_true_transition_matrix(False)
empirical_T = sampler.get_empirical_transition_matrix()
self.assertTrue(
self.approx_equal(analytic_T, empirical_T),
"analytic matrix: \n {} is not within tolerance of empirical: \n {}. I am {}".format(
analytic_T, empirical_T, self.sampler_to_test.__name__
)
)
def approx_equal(self, arr1, arr2):
return np.linalg.norm(arr1 - arr2) < EPS
def get_distribution(self, energies):
"""
helper method that initializes the sampler with energies
and returns empirical_distr, theoretical_distribution
"""
sampler = self.sampler_to_test(
len(energies) * 2,
energies
)
sampler.sample(self.n_sampler_steps, burn_in=True)
return sampler.get_empirical_distr(), sampler.calculate_true_distribution()
def test_convergence(self):
"""
test that the algebraic samplers converge to the right distribution
"""
# 2d uniform
self.assertTrue(
self.approx_equal(*self.get_distribution(np.ones(2))),
msg="did not converge to 2d uniform distribution "
)
# 5d uniform
self.assertTrue(
self.approx_equal(*self.get_distribution(np.ones(5))),
msg="did not converge to 5d uniform distribution "
)
# 10d uniform
self.assertTrue(
self.approx_equal(*self.get_distribution(np.ones(10))),
msg="did not converge to 10d uniform distribution "
)
# 5d linear
self.assertTrue(
self.approx_equal(*self.get_distribution(np.arange(5))),
msg="did not converge to 5d linear distribution (np.arange(5)) "
)
# 5d randn
self.assertTrue(
self.approx_equal(*self.get_distribution(np.random.randn(5))),
msg="did not converge to 5d gaussian random "
)
class TestAlgebraicHMC(TestAlgebraicDiscrete):
@overrides(TestAlgebraicDiscrete)
def setUp(self):
np.random.seed(n_seed)
self.n_sampler_steps = 5000
self.sampler_to_test = AlgebraicHMC
class TestAlgebraicContinuous(TestAlgebraicDiscrete):
@overrides(TestAlgebraicDiscrete)
def setUp(self):
np.random.seed(n_seed)
self.n_sampler_steps = 5000
self.sampler_to_test = AlgebraicContinuous
class TestAlgebraicReducedFlip(TestAlgebraicDiscrete):
@overrides(TestAlgebraicDiscrete)
def setUp(self):
np.random.seed(n_seed)
self.n_sampler_steps = 5000
self.sampler_to_test = AlgebraicReducedFlip
class TestStateGroup(unittest.TestCase):
def test_full_indexing(self):
"""
test that self.full_idx is bijective
"""
max_order = 100
for order in (np.arange(2, max_order) * 2):
ladder = StateGroup(order, np.ones(order / 2))
idx_map = {}
for f_k, l_k in itertools.product(np.arange(2), np.arange(order / 2)):
ladder.state = [f_k, l_k]
idx = ladder.full_idx()
self.assertNotIn(
idx,
idx_map,
"current state: {} conflicts with previous for index {}".format(
[f_k, l_k], idx
)
)
idx_map[idx] = [f_k, l_k]
|
rueberger/MJHMC
|
mjhmc/tests/test_algebraic_samplers.py
|
Python
|
gpl-2.0
| 4,765
|
[
"Gaussian"
] |
717a1e12960bdc11ccc9033864653ca7f8936ed7d933b01433a2cd6218a1e47d
|
"""
This module contain solvers for all kinds of equations:
- algebraic or transcendental, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from __future__ import print_function, division
from sympy.core.compatibility import (iterable, is_sequence, ordered,
default_sort_key, range)
from sympy.core.sympify import sympify
from sympy.core import S, Add, Symbol, Equality, Dummy, Expr, Mul, Pow
from sympy.core.exprtools import factor_terms
from sympy.core.function import (expand_mul, expand_multinomial, expand_log,
Derivative, AppliedUndef, UndefinedFunction, nfloat,
Function, expand_power_exp, Lambda, _mexpand)
from sympy.integrals.integrals import Integral
from sympy.core.numbers import ilcm, Float
from sympy.core.relational import Relational, Ge
from sympy.logic.boolalg import And, Or, BooleanAtom
from sympy.core.basic import preorder_traversal
from sympy.functions import (log, exp, LambertW, cos, sin, tan, acos, asin, atan,
Abs, re, im, arg, sqrt, atan2)
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.simplify import (simplify, collect, powsimp, posify, powdenest,
nsimplify, denom, logcombine)
from sympy.simplify.sqrtdenest import sqrt_depth
from sympy.simplify.fu import TR1
from sympy.matrices import Matrix, zeros
from sympy.polys import roots, cancel, factor, Poly, together, degree
from sympy.polys.polyerrors import GeneratorsNeeded, PolynomialError
from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise
from sympy.utilities.lambdify import lambdify
from sympy.utilities.misc import filldedent
from sympy.utilities.iterables import uniq, generate_bell, flatten
from mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from sympy.solvers.inequalities import reduce_inequalities
from types import GeneratorType
from collections import defaultdict
import warnings
def _ispow(e):
"""Return True if e is a Pow or is exp."""
return isinstance(e, Expr) and (e.is_Pow or e.func is exp)
def _simple_dens(f, symbols):
# when checking if a denominator is zero, we can just check the
# base of powers with nonzero exponents since if the base is zero
# the power will be zero, too. To keep it simple and fast, we
# limit simplification to exponents that are Numbers
dens = set()
for d in denoms(f, symbols):
if d.is_Pow and d.exp.is_Number:
if d.exp.is_zero:
continue # foo**0 is never 0
d = d.base
dens.add(d)
return dens
def denoms(eq, symbols=None):
"""Return (recursively) set of all denominators that appear in eq
that contain any symbol in iterable ``symbols``; if ``symbols`` is
None (default) then all denominators will be returned.
Examples
========
>>> from sympy.solvers.solvers import denoms
>>> from sympy.abc import x, y, z
>>> from sympy import sqrt
>>> denoms(x/y)
set([y])
>>> denoms(x/(y*z))
set([y, z])
>>> denoms(3/x + y/z)
set([x, z])
>>> denoms(x/2 + y/z)
set([2, z])
"""
pot = preorder_traversal(eq)
dens = set()
for p in pot:
den = denom(p)
if den is S.One:
continue
for d in Mul.make_args(den):
dens.add(d)
if not symbols:
return dens
rv = []
for d in dens:
free = d.free_symbols
if any(s in free for s in symbols):
rv.append(d)
return set(rv)
def checksol(f, symbol, sol=None, **flags):
"""Checks whether sol is a solution of equation f == 0.
Input can be either a single symbol and corresponding value
or a dictionary of symbols and values. When given as a dictionary
and flag ``simplify=True``, the values in the dictionary will be
simplified. ``f`` can be a single equation or an iterable of equations.
A solution must satisfy all equations in ``f`` to be considered valid;
if a solution does not satisfy any equation, False is returned; if one or
more checks are inconclusive (and none are False) then None
is returned.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers import checksol
>>> x, y = symbols('x,y')
>>> checksol(x**4 - 1, x, 1)
True
>>> checksol(x**4 - 1, x, 0)
False
>>> checksol(x**2 + y**2 - 5**2, {x: 3, y: 4})
True
To check if an expression is zero using checksol, pass it
as ``f`` and send an empty dictionary for ``symbol``:
>>> checksol(x**2 + x - x*(x + 1), {})
True
None is returned if checksol() could not conclude.
flags:
'numerical=True (default)'
do a fast numerical check if ``f`` has only one symbol.
'minimal=True (default is False)'
a very fast, minimal testing.
'warn=True (default is False)'
show a warning if checksol() could not conclude.
'simplify=True (default)'
simplify solution before substituting into function and
simplify the function before trying specific simplifications
'force=True (default is False)'
make positive all symbols without assumptions regarding sign.
"""
from sympy.physics.units import Unit
minimal = flags.get('minimal', False)
if sol is not None:
sol = {symbol: sol}
elif isinstance(symbol, dict):
sol = symbol
else:
msg = 'Expecting (sym, val) or ({sym: val}, None) but got (%s, %s)'
raise ValueError(msg % (symbol, sol))
if iterable(f):
if not f:
raise ValueError('no functions to check')
rv = True
for fi in f:
check = checksol(fi, sol, **flags)
if check:
continue
if check is False:
return False
rv = None # don't return, wait to see if there's a False
return rv
if isinstance(f, Poly):
f = f.as_expr()
elif isinstance(f, Equality):
f = f.lhs - f.rhs
if not f:
return True
if sol and not f.has(*list(sol.keys())):
# if f(y) == 0, x=3 does not set f(y) to zero...nor does it not
return None
illegal = set([S.NaN,
S.ComplexInfinity,
S.Infinity,
S.NegativeInfinity])
if any(sympify(v).atoms() & illegal for k, v in sol.items()):
return False
was = f
attempt = -1
numerical = flags.get('numerical', True)
while 1:
attempt += 1
if attempt == 0:
val = f.subs(sol)
if isinstance(val, Mul):
val = val.as_independent(Unit)[0]
if val.atoms() & illegal:
return False
elif attempt == 1:
if val.free_symbols:
if not val.is_constant(*list(sol.keys()), simplify=not minimal):
return False
# there are free symbols -- simple expansion might work
_, val = val.as_content_primitive()
val = expand_mul(expand_multinomial(val))
elif attempt == 2:
if minimal:
return
if flags.get('simplify', True):
for k in sol:
sol[k] = simplify(sol[k])
# start over without the failed expanded form, possibly
# with a simplified solution
val = f.subs(sol)
if flags.get('force', True):
val, reps = posify(val)
# expansion may work now, so try again and check
exval = expand_mul(expand_multinomial(val))
if exval.is_number or not exval.free_symbols:
# we can decide now
val = exval
elif attempt == 3:
val = powsimp(val)
elif attempt == 4:
val = cancel(val)
elif attempt == 5:
val = val.expand()
elif attempt == 6:
val = together(val)
elif attempt == 7:
val = powsimp(val)
else:
# if there are no radicals and no functions then this can't be
# zero anymore -- can it?
pot = preorder_traversal(expand_mul(val))
seen = set()
saw_pow_func = False
for p in pot:
if p in seen:
continue
seen.add(p)
if p.is_Pow and not p.exp.is_Integer:
saw_pow_func = True
elif p.is_Function:
saw_pow_func = True
elif isinstance(p, UndefinedFunction):
saw_pow_func = True
if saw_pow_func:
break
if saw_pow_func is False:
return False
if flags.get('force', True):
# don't do a zero check with the positive assumptions in place
val = val.subs(reps)
nz = val.is_nonzero
if nz is not None:
# issue 5673: nz may be True even when False
# so these are just hacks to keep a false positive
# from being returned
# HACK 1: LambertW (issue 5673)
if val.is_number and val.has(LambertW):
# don't eval this to verify solution since if we got here,
# numerical must be False
return None
# add other HACKs here if necessary, otherwise we assume
# the nz value is correct
return not nz
break
if val == was:
continue
elif val.is_Rational:
return val == 0
if numerical and not val.free_symbols:
return bool(abs(val.n(18).n(12, chop=True)) < 1e-9)
was = val
if flags.get('warn', False):
warnings.warn("\n\tWarning: could not verify solution %s." % sol)
# returns None if it can't conclude
# TODO: improve solution testing
def check_assumptions(expr, **assumptions):
"""Checks whether expression `expr` satisfies all assumptions.
`assumptions` is a dict of assumptions: {'assumption': True|False, ...}.
Examples
========
>>> from sympy import Symbol, pi, I, exp
>>> from sympy.solvers.solvers import check_assumptions
>>> check_assumptions(-5, integer=True)
True
>>> check_assumptions(pi, real=True, integer=False)
True
>>> check_assumptions(pi, real=True, negative=True)
False
>>> check_assumptions(exp(I*pi/7), real=False)
True
>>> x = Symbol('x', real=True, positive=True)
>>> check_assumptions(2*x + 1, real=True, positive=True)
True
>>> check_assumptions(-2*x - 5, real=True, positive=True)
False
`None` is returned if check_assumptions() could not conclude.
>>> check_assumptions(2*x - 1, real=True, positive=True)
>>> z = Symbol('z')
>>> check_assumptions(z, real=True)
"""
expr = sympify(expr)
result = True
for key, expected in assumptions.items():
if expected is None:
continue
test = getattr(expr, 'is_' + key, None)
if test is expected:
continue
elif test is not None:
return False
result = None # Can't conclude, unless an other test fails.
return result
def solve(f, *symbols, **flags):
"""
Algebraically solves equations and systems of equations.
Currently supported are:
- polynomial,
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- sytems containing relational expressions.
Input is formed as:
* f
- a single Expr or Poly that must be zero,
- an Equality
- a Relational expression or boolean
- iterable of one or more of the above
* symbols (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
e.g. solve(f, x, y)
- ordered iterable of symbols
e.g. solve(f, [x, y])
* flags
'dict'=True (default is False)
return list (perhaps empty) of solution mappings
'set'=True (default is False)
return list of symbols and set of tuple(s) of solution(s)
'exclude=[] (default)'
don't try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
'check=True (default)'
If False, don't do any testing of solutions. This can be
useful if one wants to include solutions that make any
denominator zero.
'numerical=True (default)'
do a fast numerical check if ``f`` has only one symbol.
'minimal=True (default is False)'
a very fast, minimal testing.
'warn=True (default is False)'
show a warning if checksol() could not conclude.
'simplify=True (default)'
simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero
'force=True (default is False)'
make positive all symbols without assumptions regarding sign.
'rational=True (default)'
recast Floats as Rational; if this option is not used, the
system containing floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
'manual=True (default is False)'
do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually"
'implicit=True (default is False)'
allows solve to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ....
'particular=True (default is False)'
instructs solve to try to find a particular solution to a linear
system with as many zeros as possible; this is very expensive
'quick=True (default is False)'
when using particular=True, use a fast heuristic instead to find a
solution with many zeros (instead of using the very slow method
guaranteed to find the largest number of zeros possible)
'cubics=True (default)'
return explicit solutions when cubic expressions are encountered
'quartics=True (default)'
return explicit solutions when quartic expressions are encountered
'quintics=True (default)'
return explicit solutions (if possible) when quintic expressions
are encountered
Examples
========
The output varies according to the input and can be seen by example::
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
* boolean or univariate Relational
>>> solve(x < 3)
And(-oo < x, x < 3)
* to always get a list of solution mappings, use flag dict=True
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> solve([x - 3, y - 1], dict=True)
[{x: 3, y: 1}]
* to get a list of symbols and set of solution(s) use flag set=True
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], set([(-sqrt(3), 1), (sqrt(3), 1)]))
* single expression and single symbol that is in the expression
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], set([(-y,), (y,)]))
>>> solve(x**4 - 1, x, set=True)
([x], set([(-1,), (1,), (-I,), (I,)]))
* single expression with no symbol that is in the expression
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
* single expression with no symbol given
In this case, all free symbols will be selected as potential
symbols to solve for. If the equation is univariate then a list
of solutions is returned; otherwise -- as is the case when symbols are
given as an iterable of length > 1 -- a list of mappings will be returned.
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
* when an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save one from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method.
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], set([(-sqrt(-x),), (sqrt(-x),)]))
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a *symbol* implicitly, use 'implicit=True':
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: (-2*x - 6 + sqrt(3))/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* if you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use dsolve.
* single expression and more than 1 symbol
* when there is a linear solution
>>> solve(x - y**2, x, y)
[{x: y**2}]
>>> solve(x**2 - y, x, y)
[{y: x**2}]
* when undetermined coefficients are identified
* that are linear
>>> solve((a + b)*x - b + 2, a, b)
{a: -2, b: 2}
* that are nonlinear
>>> solve((a + b)*x - b**2 + 2, a, b, set=True)
([a, b], set([(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))]))
* if there is no linear solution then the first successful
attempt for a nonlinear solution will be returned
>>> solve(x**2 - y**2, x, y)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y)
[{x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[{y: -x*sqrt(exp(x))}, {y: x*sqrt(exp(x))}]
* iterable of one or more of the above
* involving relationals or bools
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* when the system is linear
* with a solution
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: -5*y + 2, z: 21*y - 6}
* without a solution
>>> solve([x + 3, x - 3])
[]
* when the system is not linear
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], set([(-2, -2), (0, 2), (2, -2)]))
* if no symbols are given, all free symbols will be selected and a list
of mappings returned
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], set([f(x), x]))
[{x: 2, f(x): -4}]
* if any equation doesn't depend on the symbol(s) given it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest
>>> solve([x - y, y - 3], x)
{x: y}
Notes
=====
assumptions aren't checked when `solve()` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since sin(x)/x has the well known
limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
Disabling high-order, explicit solutions
----------------------------------------
When solving polynomial expressions, one might not want explicit solutions
(which can be quite long). If the expression is univariate, RootOf
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) - (-1/2 -
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3, -(-1/2 +
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 - 1/((-1/2 +
sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)), -(3*sqrt(69)/2 +
27/2)**(1/3)/3 - 1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[RootOf(x**3 - x + 1, 0), RootOf(x**3 - x + 1, 1), RootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
Solving equations involving radicals
------------------------------------
Because of SymPy's use of the principle root (issue #8789), some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example there is only a single solution to the equation. Other
expressions will yield spurious roots which must be checked manually;
roots which give a negative argument to odd-powered radicals will also need
special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[RootOf(7*_p**5 - 7*_p**3 + 1, 1)**15,
RootOf(7*_p**5 - 7*_p**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so real_root must be used to see that
it satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be necessary
to find the roots, especially for higher order equations. Consider the
following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The solve function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function unrad, however, can be used to get a form of the equation for
which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although eq or eq1 could have been used to find xvals, the solution can
only be verified with expr1:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
See Also
========
- rsolve() for solving recurrence relationships
- dsolve() for solving differential equations
"""
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
ordered_symbols = (symbols and
symbols[0] and
(isinstance(symbols[0], Symbol) or
is_sequence(symbols[0],
include=GeneratorType)
)
)
f, symbols = (_sympified_list(w) for w in [f, symbols])
implicit = flags.get('implicit', False)
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, Equality):
if 'ImmutableMatrix' in [type(a).__name__ for a in fi.args]:
f[i] = fi.lhs - fi.rhs
else:
f[i] = Add(fi.lhs, -fi.rhs, evaluate=False)
elif isinstance(fi, Poly):
f[i] = fi.as_expr()
elif isinstance(fi, (bool, BooleanAtom)) or fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
# rewrite hyperbolics in terms of exp
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction),
lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# preprocess symbol(s)
###########################################################################
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not (p.is_number or p.is_Add or p.is_Mul) or \
isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
symbols = list(symbols)
# supply dummy symbols so solve(3) behaves like solve(3, x)
for i in range(len(f) - len(symbols)):
symbols.append(Dummy())
ordered_symbols = False
elif len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
# remove symbols the user is not interested in
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# real/imag handling -----------------------------
w = Dummy('w')
piece = Lambda(w, Piecewise((w, Ge(w, 0)), (-w, True)))
for i, fi in enumerate(f):
# Abs
reps = []
for a in fi.atoms(Abs):
if not a.has(*symbols):
continue
if a.args[0].is_real is None:
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % a)
reps.append((a, piece(a.args[0]) if a.args[0].is_real else \
piece(a.args[0]*S.ImaginaryUnit)))
fi = fi.subs(reps)
# arg
_arg = [a for a in fi.atoms(arg) if a.has(*symbols)]
fi = fi.xreplace(dict(list(zip(_arg,
[atan(im(a.args[0])/re(a.args[0])) for a in _arg]))))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in f):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
for i, fi in enumerate(f):
f[i] = fi.xreplace({s: rhs})
f.append(s - rhs)
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
symbols = list(uniq(symbols))
if not ordered_symbols:
# we do this to make the results returned canonical in case f
# contains a system of nonlinear equations; all other cases should
# be unambiguous
symbols = sorted(symbols, key=default_sort_key)
# we can solve for non-symbol entities by replacing them with Dummy symbols
symbols_new = []
symbol_swapped = False
for i, s in enumerate(symbols):
if s.is_Symbol:
s_new = s
else:
symbol_swapped = True
s_new = Dummy('X%d' % i)
symbols_new.append(s_new)
if symbol_swapped:
swap_sym = list(zip(symbols, symbols_new))
f = [fi.subs(swap_sym) for fi in f]
symbols = symbols_new
swap_sym = dict([(v, k) for k, v in swap_sym])
else:
swap_sym = {}
# this is needed in the next two events
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.has(*symset):
ok = True
else:
free = fi.free_symbols
if not free:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif not p in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for d in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
non_inverts = [(v, k.subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any([_has_piecewise(a) for a in e.args])
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return dict([(k, v.subs(non_inverts)) for k, v in
solution.items()])
for i in range(1):
if type(solution) is dict:
solution = _do_dict(solution)
break
elif solution and type(solution) is list:
if type(solution[0]) is dict:
solution = [_do_dict(s) for s in solution]
break
elif type(solution[0]) is tuple:
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
break
else:
solution = [v.subs(non_inverts) for v in solution]
break
elif not solution:
break
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if symbol_swapped:
symbols = [swap_sym[k] for k in symbols]
if type(solution) is dict:
solution = dict([(swap_sym[k], v.subs(swap_sym))
for k, v in solution.items()])
elif solution and type(solution) is list and type(solution[0]) is dict:
for i, sol in enumerate(solution):
solution[i] = dict([(swap_sym[k], v.subs(swap_sym))
for k, v in sol.items()])
# undo the dictionary solutions returned when the system was only partially
# solved with poly-system if all symbols are present
if (
not flags.get('dict', False) and
solution and
ordered_symbols and
type(solution) is not dict and
type(solution[0]) is dict and
all(s in solution[0] for s in symbols)
):
solution = [tuple([r[s].subs(r) for s in symbols]) for r in solution]
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if type(solution) is tuple:
# this has already been checked and is in as_set form
return solution
elif type(solution) is list:
if type(solution[0]) is tuple:
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif type(solution[0]) is dict:
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif type(solution) is dict:
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
can't be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if not as_set and isinstance(solution, list):
# Make sure that a list of solutions is ordered in a canonical way.
solution.sort(key=default_sort_key)
if not as_dict and not as_set:
return solution or []
# return a list of mappings or []
if not solution:
solution = []
else:
if isinstance(solution, dict):
solution = [solution]
elif iterable(solution[0]):
solution = [dict(list(zip(symbols, s))) for s in solution]
elif isinstance(solution[0], dict):
pass
else:
if len(symbols) != 1:
raise ValueError("Length should be 1")
solution = [{symbols[0]: s} for s in solution]
if as_dict:
return solution
assert as_set
if not solution:
return [], set()
k = list(ordered(solution[0].keys()))
return k, set([tuple([s[ki] for ki in k]) for s in solution])
def _solve(f, *symbols, **flags):
"""Return a checked solution for f in terms of one or more of the
symbols. A list should be returned except for the case when a linear
undetermined-coefficients equation is encountered (in which case
a dictionary is returned).
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised."""
not_impl_msg = "No algorithms are implemented to solve equation %s"
if len(symbols) != 1:
soln = None
free = f.free_symbols
ex = free - set(symbols)
if len(ex) != 1:
ind, dep = f.as_independent(*symbols)
ex = ind.free_symbols & dep.free_symbols
if len(ex) == 1:
ex = ex.pop()
try:
# soln may come back as dict, list of dicts or tuples, or
# tuple of symbol list and set of solution tuples
soln = solve_undetermined_coeffs(f, symbols, ex, **flags)
except NotImplementedError:
pass
if soln:
if flags.get('simplify', True):
if type(soln) is dict:
for k in soln:
soln[k] = simplify(soln[k])
elif type(soln) is list:
if type(soln[0]) is dict:
for d in soln:
for k in d:
d[k] = simplify(d[k])
elif type(soln[0]) is tuple:
soln = [tuple(simplify(i) for i in j) for j in soln]
else:
raise TypeError('unrecognized args in list')
elif type(soln) is tuple:
sym, sols = soln
soln = sym, set([tuple(simplify(i) for i in j) for j in sols])
else:
raise TypeError('unrecognized solution type')
return soln
# find first successful solution
failed = []
got_s = set([])
result = []
for s in symbols:
n, d = solve_linear(f, symbols=[s])
if n.is_Symbol:
# no need to check but we should simplify if desired
if flags.get('simplify', True):
d = simplify(d)
if got_s and any([ss in d.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(n)
result.append({n: d})
elif n and d: # otherwise there was no solution for s
failed.append(s)
if not failed:
return result
for s in failed:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if got_s and any([ss in sol.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(s)
result.append({s: sol})
except NotImplementedError:
continue
if got_s:
return result
else:
raise NotImplementedError(not_impl_msg % f)
symbol = symbols[0]
# /!\ capture this flag then set it to False so that no checking in
# recursive calls will be done; only the final answer is checked
checkdens = check = flags.pop('check', True)
flags['check'] = False
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
soln = _solve(m, symbol, **flags)
result.update(set(soln))
result = list(result)
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = _simple_dens(f, symbols)
result = [s for s in result if
all(not checksol(den, {symbol: s}, **flags) for den in
dens)]
# set flags for quick exit at end
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for n, (expr, cond) in enumerate(f.args):
candidates = _solve(expr, *symbols, **flags)
for candidate in candidates:
if candidate in result:
continue
try:
v = (cond == True) or cond.subs(symbol, candidate)
except:
v = False
if v != False:
# Only include solutions that do not match the condition
# of any previous pieces.
matches_other_piece = False
for other_n, (other_expr, other_cond) in enumerate(f.args):
if other_n == n:
break
if other_cond == False:
continue
try:
if other_cond.subs(symbol, candidate) == True:
matches_other_piece = True
break
except:
pass
if not matches_other_piece:
v = v == True or v.doit()
if isinstance(v, Relational):
v = v.canonical
result.add(Piecewise(
(candidate, v),
(S.NaN, True)
))
check = False
else:
# first see if it really depends on symbol and whether there
# is a linear solution
f_num, sol = solve_linear(f, symbols=symbols)
if not symbol in f_num.free_symbols:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [sol]
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
try:
poly = Poly(f_num)
if poly is None:
raise ValueError('could not convert %s to Poly' % f_num)
except GeneratorsNeeded:
simplified_f = simplify(f_num)
if simplified_f != f_num:
return _solve(simplified_f, symbol, **flags)
raise ValueError('expression appears to be a constant')
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.q
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c is not S.One: # c could be a Float
return b**ee, c.q
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = set(b for b in bases if b.is_Function)
trig = set([_ for _ in funcs if
isinstance(_, TrigonometricFunction)])
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
result = _solve(newf, symbol, **flags)
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = _solve(t - f1, symbol, **flags)[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
result = list(ordered(sols))
if result is False:
msg = 'multiple generators %s' % gens
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _solve(u - t, symbol, **flags)
if isinstance(u, (Pow, exp)):
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs(exp(x),y) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs(exp(x),y) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(
lambda w: w.is_Pow or isinstance(w, exp),
_expand).subs(u, t)
if not ftry.has(symbol):
soln = _solve(ftry, t, **flags)
sols = list()
for sol in soln:
for i in inv:
sols.append(i.subs(t, sol))
result = list(ordered(sols))
elif len(gens) == 1:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
# Also use composite=True with f_num since Poly won't update
# poly as documented in issue 8810.
poly = Poly(f_num, gens[0], composite=True)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
soln = None
deg = poly.degree()
flags['tsolve'] = True
solvers = dict([(k, flags.get(k, True)) for k in
('cubics', 'quartics', 'quintics')])
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
try:
soln = poly.all_roots()
except NotImplementedError:
if not flags.get('incomplete', True):
raise NotImplementedError(
filldedent('''
Neither high-order multivariate polynomials
nor sorting of EX-domain polynomials is supported.
If you want to see any results, pass keyword incomplete=True to
solve; to see numerical values of roots
for univariate expressions, use nroots.
'''))
else:
pass
else:
soln = list(soln.keys())
if soln is not None:
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
iv = _solve(u - t, symbol, **flags)
soln = list(ordered(set([i.subs(t, s) for i in iv for s in soln])))
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
result = soln
# fallback if above fails
# -----------------------
if result is False:
# try unrad
if flags.pop('_unrad', True):
try:
u = unrad(f_num, symbol)
except (ValueError, NotImplementedError):
u = False
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _solve(ieq, symbol, **flags)[0]
rv = set([inv.subs(isym, xi) for xi in _solve(eq, isym, **flags)])
else:
try:
rv = set(_solve(eq, symbol, **flags))
except NotImplementedError:
rv = None
if rv is not None:
result = list(ordered(rv))
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
else:
pass # for coverage
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = soln
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = list(map(simplify, result))
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = _simple_dens(f, symbols)
result = [s for s in result if
all(not checksol(d, {symbol: s}, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, {symbol: r}, **flags) is not False]
return result
def _solve_system(exprs, symbols, **flags):
if not exprs:
return []
polys = []
dens = set()
failed = []
result = False
linear = False
manual = flags.get('manual', False)
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(_simple_dens(g, symbols))
i, d = _invert(g, *symbols)
g = d - i
g = g.as_numer_denom()[0]
if manual:
failed.append(g)
continue
poly = g.as_poly(*symbols, extension=True)
if poly is not None:
polys.append(poly)
else:
failed.append(g)
if not polys:
solved_syms = []
else:
if all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary ({symbols: values}) or None
if flags.pop('particular', False):
result = minsolve_linear_system(matrix, *symbols, **flags)
else:
result = solve_linear_system(matrix, *symbols, **flags)
if failed:
if result:
solved_syms = list(result.keys())
else:
solved_syms = []
else:
linear = True
else:
if len(symbols) > len(polys):
from sympy.utilities.iterables import subsets
free = set().union(*[p.free_symbols for p in polys])
free = list(ordered(free.intersection(symbols)))
got_s = set()
result = []
for syms in subsets(free, len(polys)):
try:
# returns [] or list of tuples of solutions for syms
res = solve_poly_system(polys, *syms)
if res:
for r in res:
skip = False
for r1 in r:
if got_s and any([ss in r1.free_symbols
for ss in got_s]):
# sol depends on previously
# solved symbols: discard it
skip = True
if not skip:
got_s.update(syms)
result.extend([dict(list(zip(syms, r)))])
except NotImplementedError:
pass
if got_s:
solved_syms = list(got_s)
else:
raise NotImplementedError('no valid subset found')
else:
try:
result = solve_poly_system(polys, *symbols)
solved_syms = symbols
except NotImplementedError:
failed.extend([g.as_expr() for g in polys])
solved_syms = []
if result:
# we don't know here if the symbols provided were given
# or not, so let solve resolve that. A list of dictionaries
# is going to always be returned from here.
#
result = [dict(list(zip(solved_syms, r))) for r in result]
if result:
if type(result) is dict:
result = [result]
else:
result = [{}]
if failed:
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = (e.free_symbols - solved_syms) & legal
if sort:
rv = list(rv)
rv.sort(key=default_sort_key)
return rv
solved_syms = set(solved_syms) # set of symbols we have solved for
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
u = Dummy() # used in solution checking
newresult = []
bad_results = []
got_s = set()
hit = False
for r in result:
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, u, eq2, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
if r:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
try:
soln = _solve(eq2, s, **flags)
except NotImplementedError:
continue
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s in being added in-place
for sol in soln:
if got_s and any([ss in sol.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs(s, sol)
# and add this new solution
rnew[s] = sol
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit:
raise NotImplementedError('could not solve %s' % eq2)
else:
result = newresult
for b in bad_results:
if b in result:
result.remove(b)
default_simplify = bool(failed) # rely on system-solvers to simplify
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and not linear:
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
result = [r for r in result if r]
if linear and result:
result = result[0]
return result
def solve_linear(lhs, rhs=0, symbols=[], exclude=[]):
r""" Return a tuple derived from f = lhs - rhs that is either:
(numerator, denominator) of ``f``
If this comes back as (0, 1) it means
that ``f`` is independent of the symbols in ``symbols``, e.g::
y*cos(x)**2 + y*sin(x)**2 - y = y*(0) = 0
cos(x)**2 + sin(x)**2 = 1
If it comes back as (0, 0) there is no solution to the equation
amongst the symbols given.
If the numerator is not zero then the function is guaranteed
to be dependent on a symbol in ``symbols``.
or
(symbol, solution) where symbol appears linearly in the numerator of
``f``, is in ``symbols`` (if given) and is not in ``exclude`` (if given).
No simplification is done to ``f`` other than and mul=True expansion,
so the solution will correspond strictly to a unique solution.
Examples
========
>>> from sympy.solvers.solvers import solve_linear
>>> from sympy.abc import x, y, z
These are linear in x and 1/x:
>>> solve_linear(x + y**2)
(x, -y**2)
>>> solve_linear(1/x - y**2)
(x, y**(-2))
When not linear in x or y then the numerator and denominator are returned.
>>> solve_linear(x**2/y**2 - 3)
(x**2 - 3*y**2, y**2)
If the numerator is a symbol then (0, 0) is returned if the solution for
that symbol would have set any denominator to 0:
>>> solve_linear(1/(1/x - 2))
(0, 0)
>>> 1/(1/x) # to SymPy, this looks like x ...
x
>>> solve_linear(1/(1/x)) # so a solution is given
(x, 0)
If x is allowed to cancel, then this appears linear, but this sort of
cancellation is not done so the solution will always satisfy the original
expression without causing a division by zero error.
>>> solve_linear(x**2*(1/x - z**2/x))
(x**2*(-z**2 + 1), x)
You can give a list of what you prefer for x candidates:
>>> solve_linear(x + y + z, symbols=[y])
(y, -x - z)
You can also indicate what variables you don't want to consider:
>>> solve_linear(x + y + z, exclude=[x, z])
(y, -x - z)
If only x was excluded then a solution for y or z might be obtained.
"""
if isinstance(lhs, Equality):
if rhs:
raise ValueError(filldedent('''
If lhs is an Equality, rhs must be 0 but was %s''' % rhs))
rhs = lhs.rhs
lhs = lhs.lhs
dens = None
eq = lhs - rhs
n, d = eq.as_numer_denom()
if not n:
return S.Zero, S.One
free = n.free_symbols
if not symbols:
symbols = free
else:
bad = [s for s in symbols if not s.is_Symbol]
if bad:
if len(bad) == 1:
bad = bad[0]
if len(symbols) == 1:
eg = 'solve(%s, %s)' % (eq, symbols[0])
else:
eg = 'solve(%s, *%s)' % (eq, list(symbols))
raise ValueError(filldedent('''
solve_linear only handles symbols, not %s. To isolate
non-symbols use solve, e.g. >>> %s <<<.
''' % (bad, eg)))
symbols = free.intersection(symbols)
symbols = symbols.difference(exclude)
dfree = d.free_symbols
# derivatives are easy to do but tricky to analyze to see if they are going
# to disallow a linear solution, so for simplicity we just evaluate the
# ones that have the symbols of interest
derivs = defaultdict(list)
for der in n.atoms(Derivative):
csym = der.free_symbols & symbols
for c in csym:
derivs[c].append(der)
if symbols:
all_zero = True
for xi in symbols:
# if there are derivatives in this var, calculate them now
if type(derivs[xi]) is list:
derivs[xi] = dict([(der, der.doit()) for der in derivs[xi]])
nn = n.subs(derivs[xi])
dn = nn.diff(xi)
if dn:
all_zero = False
if dn is S.NaN:
break
if not xi in dn.free_symbols:
vi = -(nn.subs(xi, 0))/dn
if dens is None:
dens = _simple_dens(eq, symbols)
if not any(checksol(di, {xi: vi}, minimal=True) is True
for di in dens):
# simplify any trivial integral
irep = [(i, i.doit()) for i in vi.atoms(Integral) if
i.function.is_number]
# do a slight bit of simplification
vi = expand_mul(vi.subs(irep))
if not d.has(xi) or not (d/xi).has(xi):
return xi, vi
if all_zero:
return S.Zero, S.One
if n.is_Symbol: # there was no valid solution
n = d = S.Zero
return n, d # should we cancel now?
def minsolve_linear_system(system, *symbols, **flags):
r"""
Find a particular solution to a linear system.
In particular, try to find a solution with the minimal possible number
of non-zero variables. This is a very computationally hard prolem.
If ``quick=True``, a heuristic is used. Otherwise a naive algorithm with
exponential complexity is used.
"""
quick = flags.get('quick', False)
# Check if there are any non-zero solutions at all
s0 = solve_linear_system(system, *symbols, **flags)
if not s0 or all(v == 0 for v in s0.values()):
return s0
if quick:
# We just solve the system and try to heuristically find a nice
# solution.
s = solve_linear_system(system, *symbols)
def update(determined, solution):
delete = []
for k, v in solution.items():
solution[k] = v.subs(determined)
if not solution[k].free_symbols:
delete.append(k)
determined[k] = solution[k]
for k in delete:
del solution[k]
determined = {}
update(determined, s)
while s:
# NOTE sort by default_sort_key to get deterministic result
k = max((k for k in s.values()),
key=lambda x: (len(x.free_symbols), default_sort_key(x)))
x = max(k.free_symbols, key=default_sort_key)
if len(k.free_symbols) != 1:
determined[x] = S(0)
else:
val = solve(k)[0]
if val == 0 and all(v.subs(x, val) == 0 for v in s.values()):
determined[x] = S(1)
else:
determined[x] = val
update(determined, s)
return determined
else:
# We try to select n variables which we want to be non-zero.
# All others will be assumed zero. We try to solve the modified system.
# If there is a non-trivial solution, just set the free variables to
# one. If we do this for increasing n, trying all combinations of
# variables, we will find an optimal solution.
# We speed up slightly by starting at one less than the number of
# variables the quick method manages.
from itertools import combinations
from sympy.utilities.misc import debug
N = len(symbols)
bestsol = minsolve_linear_system(system, *symbols, quick=True)
n0 = len([x for x in bestsol.values() if x != 0])
for n in range(n0 - 1, 1, -1):
debug('minsolve: %s' % n)
thissol = None
for nonzeros in combinations(list(range(N)), n):
subm = Matrix([system.col(i).T for i in nonzeros] + [system.col(-1).T]).T
s = solve_linear_system(subm, *[symbols[i] for i in nonzeros])
if s and not all(v == 0 for v in s.values()):
subs = [(symbols[v], S(1)) for v in nonzeros]
for k, v in s.items():
s[k] = v.subs(subs)
for sym in symbols:
if sym not in s:
if symbols.index(sym) in nonzeros:
s[sym] = S(1)
else:
s[sym] = S(0)
thissol = s
break
if thissol is None:
break
bestsol = thissol
return bestsol
def solve_linear_system(system, *symbols, **flags):
r"""
Solve system of N linear equations with M variables, which means
both under- and overdetermined systems are supported. The possible
number of solutions is zero, one or infinite. Respectively, this
procedure will return None or a dictionary with solutions. In the
case of underdetermined systems, all arbitrary parameters are skipped.
This may cause a situation in which an empty dictionary is returned.
In that case, all symbols can be assigned arbitrary values.
Input to this functions is a Nx(M+1) matrix, which means it has
to be in augmented form. If you prefer to enter N equations and M
unknowns then use `solve(Neqs, *Msymbols)` instead. Note: a local
copy of the matrix is made by this routine so the matrix that is
passed will not be modified.
The algorithm used here is fraction-free Gaussian elimination,
which results, after elimination, in an upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
>>> from sympy import Matrix, solve_linear_system
>>> from sympy.abc import x, y
Solve the following system::
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
A degenerate system returns an empty dictionary.
>>> system = Matrix(( (0,0,0), (0,0,0) ))
>>> solve_linear_system(system, x, y)
{}
"""
do_simplify = flags.get('simplify', True)
if system.rows == system.cols - 1 == len(symbols):
try:
# well behaved n-equations and n-unknowns
inv = inv_quick(system[:, :-1])
rv = dict(zip(symbols, inv*system[:, -1]))
if do_simplify:
for k, v in rv.items():
rv[k] = simplify(v)
if not all(i.is_zero for i in rv.values()):
# non-trivial solution
return rv
except ValueError:
pass
matrix = system[:, :]
syms = list(symbols)
i, m = 0, matrix.cols - 1 # don't count augmentation
while i < matrix.rows:
if i == m:
# an overdetermined system
if any(matrix[i:, m]):
return None # no solutions
else:
# remove trailing rows
matrix = matrix[:i, :]
break
if not matrix[i, i]:
# there is no pivot in current column
# so try to find one in other columns
for k in range(i + 1, m):
if matrix[i, k]:
break
else:
if matrix[i, m]:
# We need to know if this is always zero or not. We
# assume that if there are free symbols that it is not
# identically zero (or that there is more than one way
# to make this zero). Otherwise, if there are none, this
# is a constant and we assume that it does not simplify
# to zero XXX are there better (fast) ways to test this?
# The .equals(0) method could be used but that can be
# slow; numerical testing is prone to errors of scaling.
if not matrix[i, m].free_symbols:
return None # no solution
# A row of zeros with a non-zero rhs can only be accepted
# if there is another equivalent row. Any such rows will
# be deleted.
nrows = matrix.rows
rowi = matrix.row(i)
ip = None
j = i + 1
while j < matrix.rows:
# do we need to see if the rhs of j
# is a constant multiple of i's rhs?
rowj = matrix.row(j)
if rowj == rowi:
matrix.row_del(j)
elif rowj[:-1] == rowi[:-1]:
if ip is None:
_, ip = rowi[-1].as_content_primitive()
_, jp = rowj[-1].as_content_primitive()
if not (simplify(jp - ip) or simplify(jp + ip)):
matrix.row_del(j)
j += 1
if nrows == matrix.rows:
# no solution
return None
# zero row or was a linear combination of
# other rows or was a row with a symbolic
# expression that matched other rows, e.g. [0, 0, x - y]
# so now we can safely skip it
matrix.row_del(i)
if not matrix:
# every choice of variable values is a solution
# so we return an empty dict instead of None
return dict()
continue
# we want to change the order of colums so
# the order of variables must also change
syms[i], syms[k] = syms[k], syms[i]
matrix.col_swap(i, k)
pivot_inv = S.One/matrix[i, i]
# divide all elements in the current row by the pivot
matrix.row_op(i, lambda x, _: x * pivot_inv)
for k in range(i + 1, matrix.rows):
if matrix[k, i]:
coeff = matrix[k, i]
# subtract from the current row the row containing
# pivot and multiplied by extracted coefficient
matrix.row_op(k, lambda x, j: simplify(x - matrix[i, j]*coeff))
i += 1
# if there weren't any problems, augmented matrix is now
# in row-echelon form so we can check how many solutions
# there are and extract them using back substitution
if len(syms) == matrix.rows:
# this system is Cramer equivalent so there is
# exactly one solution to this system of equations
k, solutions = i - 1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in range(k + 1, m):
content -= matrix[k, j]*solutions[syms[j]]
if do_simplify:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
elif len(syms) > matrix.rows:
# this system will have infinite number of solutions
# dependent on exactly len(syms) - i parameters
k, solutions = i - 1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in range(k + 1, i):
content -= matrix[k, j]*solutions[syms[j]]
# run back-substitution for parameters
for j in range(i, m):
content -= matrix[k, j]*syms[j]
if do_simplify:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
else:
return [] # no solutions
def solve_undetermined_coeffs(equ, coeffs, sym, **flags):
"""Solve equation of a type p(x; a_1, ..., a_k) == q(x) where both
p, q are univariate polynomials and f depends on k parameters.
The result of this functions is a dictionary with symbolic
values of those parameters with respect to coefficients in q.
This functions accepts both Equations class instances and ordinary
SymPy expressions. Specification of parameters and variable is
obligatory for efficiency and simplicity reason.
>>> from sympy import Eq
>>> from sympy.abc import a, b, c, x
>>> from sympy.solvers import solve_undetermined_coeffs
>>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x)
{a: 1/c, b: -1/c}
"""
if isinstance(equ, Equality):
# got equation, so move all the
# terms to the left hand side
equ = equ.lhs - equ.rhs
equ = cancel(equ).as_numer_denom()[0]
system = list(collect(equ.expand(), sym, evaluate=False).values())
if not any(equ.has(sym) for equ in system):
# consecutive powers in the input expressions have
# been successfully collected, so solve remaining
# system using Gaussian elimination algorithm
return solve(system, *coeffs, **flags)
else:
return None # no solutions
def solve_linear_system_LU(matrix, syms):
"""
Solves the augmented matrix system using LUsolve and returns a dictionary
in which solutions are keyed to the symbols of syms *as ordered*.
The matrix must be invertible.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.solvers import solve_linear_system_LU
>>> solve_linear_system_LU(Matrix([
... [1, 2, 0, 1],
... [3, 2, 2, 1],
... [2, 0, 0, 1]]), [x, y, z])
{x: 1/2, y: 1/4, z: -1/2}
See Also
========
sympy.matrices.LUsolve
"""
if matrix.rows != matrix.cols - 1:
raise ValueError("Rows should be equal to columns - 1")
A = matrix[:matrix.rows, :matrix.rows]
b = matrix[:, matrix.cols - 1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i, 0]
return solutions
def det_perm(M):
"""Return the det(``M``) by using permutations to select factors.
For size larger than 8 the number of permutations becomes prohibitively
large, or if there are no symbols in the matrix, it is better to use the
standard determinant routines, e.g. `M.det()`.
See Also
========
det_minor
det_quick
"""
args = []
s = True
n = M.rows
try:
list = M._mat
except AttributeError:
list = flatten(M.tolist())
for perm in generate_bell(n):
fac = []
idx = 0
for j in perm:
fac.append(list[idx + j])
idx += n
term = Mul(*fac) # disaster with unevaluated Mul -- takes forever for n=7
args.append(term if s else -term)
s = not s
return Add(*args)
def det_minor(M):
"""Return the ``det(M)`` computed from minors without
introducing new nesting in products.
See Also
========
det_perm
det_quick
"""
n = M.rows
if n == 2:
return M[0, 0]*M[1, 1] - M[1, 0]*M[0, 1]
else:
return sum([(1, -1)[i % 2]*Add(*[M[0, i]*d for d in
Add.make_args(det_minor(M.minorMatrix(0, i)))])
if M[0, i] else S.Zero for i in range(n)])
def det_quick(M, method=None):
"""Return ``det(M)`` assuming that either
there are lots of zeros or the size of the matrix
is small. If this assumption is not met, then the normal
Matrix.det function will be used with method = ``method``.
See Also
========
det_minor
det_perm
"""
if any(i.has(Symbol) for i in M):
if M.rows < 8 and all(i.has(Symbol) for i in M):
return det_perm(M)
return det_minor(M)
else:
return M.det(method=method) if method else M.det()
def inv_quick(M):
"""Return the inverse of ``M``, assuming that either
there are lots of zeros or the size of the matrix
is small.
"""
from sympy.matrices import zeros
if any(i.has(Symbol) for i in M):
if all(i.has(Symbol) for i in M):
det = lambda _: det_perm(_)
else:
det = lambda _: det_minor(_)
else:
return M.inv()
n = M.rows
d = det(M)
if d is S.Zero:
raise ValueError("Matrix det == 0; not invertible.")
ret = zeros(n)
s1 = -1
for i in range(n):
s = s1 = -s1
for j in range(n):
di = det(M.minorMatrix(i, j))
ret[j, i] = s*di/d
s = -s
return ret
# these are functions that have multiple inverse values per period
multi_inverses = {
sin: lambda x: (asin(x), S.Pi - asin(x)),
cos: lambda x: (acos(x), 2*S.Pi - acos(x)),
}
def _tsolve(eq, sym, **flags):
"""
Helper for _solve that solves a transcendental equation with respect
to the given symbol. Various equations containing powers and logarithms,
can be solved.
There is currently no guarantee that all solutions will be returned or
that a real solution will be favored over a complex one.
Either a list of potential solutions will be returned or None will be
returned (in the case that no method was known to get a solution
for the equation). All other errors (like the inability to cast an
expression as a Poly) are unhandled.
Examples
========
>>> from sympy import log
>>> from sympy.solvers.solvers import _tsolve as tsolve
>>> from sympy.abc import x
>>> tsolve(3**(2*x + 5) - 4, x)
[-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if 'tsolve_saw' not in flags:
flags['tsolve_saw'] = []
if eq in flags['tsolve_saw']:
return None
else:
flags['tsolve_saw'].append(eq)
rhs, lhs = _invert(eq, sym)
if lhs == sym:
return [rhs]
try:
if lhs.is_Add:
# it's time to try factoring; powdenest is used
# to try get powers in standard form for better factoring
f = factor(powdenest(lhs - rhs))
if f.is_Mul:
return _solve(f, sym, **flags)
if rhs:
f = logcombine(lhs, force=flags.get('force', True))
if f.count(log) != lhs.count(log):
if f.func is log:
return _solve(f.args[0] - exp(rhs), sym, **flags)
return _tsolve(f - rhs, sym)
elif lhs.is_Pow:
if lhs.exp.is_Integer:
if lhs - rhs != eq:
return _solve(lhs - rhs, sym, **flags)
elif sym not in lhs.exp.free_symbols:
return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags)
elif not rhs and sym in lhs.exp.free_symbols:
# f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at
# the same place
sol_base = _solve(lhs.base, sym, **flags)
if not sol_base:
return sol_base # no solutions to remove so return now
return list(ordered(set(sol_base) - set(
_solve(lhs.exp, sym, **flags))))
elif (rhs is not S.Zero and
lhs.base.is_positive and
lhs.exp.is_real):
return _solve(lhs.exp*log(lhs.base) - log(rhs), sym, **flags)
elif lhs.base == 0 and rhs == 1:
return _solve(lhs.exp, sym, **flags)
elif lhs.is_Mul and rhs.is_positive:
llhs = expand_log(log(lhs))
if llhs.is_Add:
return _solve(llhs - log(rhs), sym, **flags)
elif lhs.is_Function and len(lhs.args) == 1 and lhs.func in multi_inverses:
# sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3))
soln = []
for i in multi_inverses[lhs.func](rhs):
soln.extend(_solve(lhs.args[0] - i, sym, **flags))
return list(ordered(soln))
rewrite = lhs.rewrite(exp)
if rewrite != lhs:
return _solve(rewrite - rhs, sym, **flags)
except NotImplementedError:
pass
# maybe it is a lambert pattern
if flags.pop('bivariate', True):
# lambert forms may need some help being recognized, e.g. changing
# 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1
# to 2**(3*x) + (x*log(2) + 1)**3
g = _filtered_gens(eq.as_poly(), sym)
up_or_log = set()
for gi in g:
if gi.func is exp or gi.func is log:
up_or_log.add(gi)
elif gi.is_Pow:
gisimp = powdenest(expand_power_exp(gi))
if gisimp.is_Pow and sym in gisimp.exp.free_symbols:
up_or_log.add(gi)
down = g.difference(up_or_log)
eq_down = expand_log(expand_power_exp(eq)).subs(
dict(list(zip(up_or_log, [0]*len(up_or_log)))))
eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down))
rhs, lhs = _invert(eq, sym)
if lhs.has(sym):
try:
poly = lhs.as_poly()
g = _filtered_gens(poly, sym)
return _solve_lambert(lhs - rhs, sym, g)
except NotImplementedError:
# maybe it's a convoluted function
if len(g) == 2:
try:
gpu = bivariate_type(lhs - rhs, *g)
if gpu is None:
raise NotImplementedError
g, p, u = gpu
flags['bivariate'] = False
inversion = _tsolve(g - u, sym, **flags)
if inversion:
sol = _solve(p, u, **flags)
return list(ordered(set([i.subs(u, s)
for i in inversion for s in sol])))
except NotImplementedError:
pass
else:
pass
if flags.pop('force', True):
flags['force'] = False
pos, reps = posify(lhs - rhs)
for u, s in reps.items():
if s == sym:
break
else:
u = sym
if pos.has(u):
try:
soln = _solve(pos, u, **flags)
return list(ordered([s.subs(reps) for s in soln]))
except NotImplementedError:
pass
else:
pass # here for coverage
return # here for coverage
# TODO: option for calculating J numerically
def nsolve(*args, **kwargs):
r"""
Solve a nonlinear equation system numerically::
nsolve(f, [args,] x0, modules=['mpmath'], **kwargs)
f is a vector function of symbolic expressions representing the system.
args are the variables. If there is only one variable, this argument can
be omitted.
x0 is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to
evaluate the function and the Jacobian matrix. Make sure to use a module
that supports matrices. For more information on the syntax, please see the
docstring of lambdify.
Overdetermined systems are supported.
>>> from sympy import Symbol, nsolve
>>> import sympy
>>> import mpmath
>>> mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print(nsolve((f1, f2), (x1, x2), (-1, 1)))
[-1.19287309935246]
[ 1.27844411169911]
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin, nsolve
>>> from sympy.abc import x
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
mpmath.findroot is used, you can find there more extensive documentation,
especially concerning keyword parameters and available solvers. Note,
however, that this routine works only with the numerator of the function
in the one-dimensional case, and for very steep functions near the root
this may lead to a failure in the verification of the root. In this case
you should use the flag `verify=False` and independently verify the
solution.
>>> from sympy import cos, cosh
>>> from sympy.abc import i
>>> f = cos(x)*cosh(x) - 1
>>> nsolve(f, 3.14*100)
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (1.39267e+230 > 2.1684e-19)
>>> ans = nsolve(f, 3.14*100, verify=False); ans
312.588469032184
>>> f.subs(x, ans).n(2)
2.1e+121
>>> (f/f.diff(x)).subs(x, ans).n(2)
7.4e-15
One might safely skip the verification if bounds of the root are known
and a bisection method is used:
>>> bounds = lambda i: (3.14*i, 3.14*(i + 1))
>>> nsolve(f, bounds(100), solver='bisect', verify=False)
315.730061685774
"""
# there are several other SymPy functions that use method= so
# guard against that here
if 'method' in kwargs:
raise ValueError(filldedent('''
Keyword "method" should not be used in this context. When using
some mpmath solvers directly, the keyword "method" is
used, but when using nsolve (and findroot) the keyword to use is
"solver".'''))
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if iterable(f):
f = list(f)
for i, fi in enumerate(f):
if isinstance(fi, Equality):
f[i] = fi.lhs - fi.rhs
f = Matrix(f).T
if not isinstance(f, Matrix):
# assume it's a sympy expression
if isinstance(f, Equality):
f = f.lhs - f.rhs
f = f.evalf()
syms = f.free_symbols
if fargs is None:
fargs = syms.copy().pop()
if not (len(syms) == 1 and (fargs in syms or fargs[0] in syms)):
raise ValueError(filldedent('''
expected a one-dimensional and numerical function'''))
# the function is much better behaved if there is no denominator
f = f.as_numer_denom()[0]
f = lambdify(fargs, f, modules)
return findroot(f, x0, **kwargs)
if len(fargs) > f.cols:
raise NotImplementedError(filldedent('''
need at least as many equations as variables'''))
verbose = kwargs.get('verbose', False)
if verbose:
print('f(x):')
print(f)
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print('J(x):')
print(J)
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
return x
def _invert(eq, *symbols, **kwargs):
"""Return tuple (i, d) where ``i`` is independent of ``symbols`` and ``d``
contains symbols. ``i`` and ``d`` are obtained after recursively using
algebraic inversion until an uninvertible ``d`` remains. If there are no
free symbols then ``d`` will be zero. Some (but not necessarily all)
solutions to the expression ``i - d`` will be related to the solutions of
the original expression.
Examples
========
>>> from sympy.solvers.solvers import _invert as invert
>>> from sympy import sqrt, cos
>>> from sympy.abc import x, y
>>> invert(x - 3)
(3, x)
>>> invert(3)
(3, 0)
>>> invert(2*cos(x) - 1)
(1/2, cos(x))
>>> invert(sqrt(x) - 3)
(3, sqrt(x))
>>> invert(sqrt(x) + y, x)
(-y, sqrt(x))
>>> invert(sqrt(x) + y, y)
(-sqrt(x), y)
>>> invert(sqrt(x) + y, x, y)
(0, sqrt(x) + y)
If there is more than one symbol in a power's base and the exponent
is not an Integer, then the principal root will be used for the
inversion:
>>> invert(sqrt(x + y) - 2)
(4, x + y)
>>> invert(sqrt(x + y) - 2)
(4, x + y)
If the exponent is an integer, setting ``integer_power`` to True
will force the principal root to be selected:
>>> invert(x**2 - 4, integer_power=True)
(2, x)
"""
eq = sympify(eq)
free = eq.free_symbols
if not symbols:
symbols = free
if not free & set(symbols):
return eq, S.Zero
dointpow = bool(kwargs.get('integer_power', False))
lhs = eq
rhs = S.Zero
while True:
was = lhs
while True:
indep, dep = lhs.as_independent(*symbols)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep is S.Zero:
break
lhs = dep
rhs -= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs /= indep
# collect like-terms in symbols
if lhs.is_Add:
terms = {}
for a in lhs.args:
i, d = a.as_independent(*symbols)
terms.setdefault(d, []).append(i)
if any(len(v) > 1 for v in terms.values()):
args = []
for d, i in terms.items():
if len(i) > 1:
args.append(Add(*i)*d)
else:
args.append(i[0]*d)
lhs = Add(*args)
# if it's a two-term Add with rhs = 0 and two powers we can get the
# dependent terms together, e.g. 3*f(x) + 2*g(x) -> f(x)/g(x) = -2/3
if lhs.is_Add and not rhs and len(lhs.args) == 2 and \
not lhs.is_polynomial(*symbols):
a, b = ordered(lhs.args)
ai, ad = a.as_independent(*symbols)
bi, bd = b.as_independent(*symbols)
if any(_ispow(i) for i in (ad, bd)):
a_base, a_exp = ad.as_base_exp()
b_base, b_exp = bd.as_base_exp()
if a_base == b_base:
# a = -b
lhs = powsimp(powdenest(ad/bd))
rhs = -bi/ai
else:
rat = ad/bd
_lhs = powsimp(ad/bd)
if _lhs != rat:
lhs = _lhs
rhs = -bi/ai
if ai*bi is S.NegativeOne:
if all(
isinstance(i, Function) for i in (ad, bd)) and \
ad.func == bd.func and len(ad.args) == len(bd.args):
if len(ad.args) == 1:
lhs = ad.args[0] - bd.args[0]
else:
# should be able to solve
# f(x, y) == f(2, 3) -> x == 2
# f(x, x + y) == f(2, 3) -> x == 2 or x == 3 - y
raise NotImplementedError('equal function with more than 1 argument')
elif lhs.is_Mul and any(_ispow(a) for a in lhs.args):
lhs = powsimp(powdenest(lhs))
if lhs.is_Function:
if hasattr(lhs, 'inverse') and len(lhs.args) == 1:
# -1
# f(x) = g -> x = f (g)
#
# /!\ inverse should not be defined if there are multiple values
# for the function -- these are handled in _tsolve
#
rhs = lhs.inverse()(rhs)
lhs = lhs.args[0]
elif lhs.func is atan2:
y, x = lhs.args
lhs = 2*atan(y/(sqrt(x**2 + y**2) + x))
if rhs and lhs.is_Pow and lhs.exp.is_Integer and lhs.exp < 0:
lhs = 1/lhs
rhs = 1/rhs
# base**a = b -> base = b**(1/a) if
# a is an Integer and dointpow=True (this gives real branch of root)
# a is not an Integer and the equation is multivariate and the
# base has more than 1 symbol in it
# The rationale for this is that right now the multi-system solvers
# doesn't try to resolve generators to see, for example, if the whole
# system is written in terms of sqrt(x + y) so it will just fail, so we
# do that step here.
if lhs.is_Pow and (
lhs.exp.is_Integer and dointpow or not lhs.exp.is_Integer and
len(symbols) > 1 and len(lhs.base.free_symbols & set(symbols)) > 1):
rhs = rhs**(1/lhs.exp)
lhs = lhs.base
if lhs == was:
break
return rhs, lhs
def unrad(eq, *syms, **flags):
""" Remove radicals with symbolic arguments and return (eq, cov),
None or raise an error:
None is returned if there are no radicals to remove.
NotImplementedError is raised if there are radicals and they cannot be
removed or if the relationship between the original symbols and the
change of variable needed to rewrite the system as a polynomial cannot
be solved.
Otherwise the tuple, ``(eq, cov)``, is returned where::
``eq``, ``cov``
``eq`` is an equation without radicals (in the symbol(s) of
interest) whose solutions are a superset of the solutions to the
original expression. ``eq`` might be re-written in terms of a new
variable; the relationship to the original variables is given by
``cov`` which is a list containing ``v`` and ``v**p - b`` where
``p`` is the power needed to clear the radical and ``b`` is the
radical now expressed as a polynomial in the symbols of interest.
For example, for sqrt(2 - x) the tuple would be
``(c, c**2 - 2 + x)``. The solutions of ``eq`` will contain
solutions to the original equation (if there are any).
``syms``
an iterable of symbols which, if provided, will limit the focus of
radical removal: only radicals with one or more of the symbols of
interest will be cleared. All free symbols are used if ``syms`` is not
set.
``flags`` are used internally for communication during recursive calls.
Two options are also recognized::
``take``, when defined, is interpreted as a single-argument function
that returns True if a given Pow should be handled.
Radicals can be removed from an expression if::
* all bases of the radicals are the same; a change of variables is
done in this case.
* if all radicals appear in one term of the expression
* there are only 4 terms with sqrt() factors or there are less than
four terms having sqrt() factors
* there are only two terms with radicals
Examples
========
>>> from sympy.solvers.solvers import unrad
>>> from sympy.abc import x
>>> from sympy import sqrt, Rational, root, real_roots, solve
>>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
(x**5 - 64, [])
>>> unrad(sqrt(x) + root(x + 1, 3))
(x**3 - x**2 - 2*x - 1, [])
>>> eq = sqrt(x) + root(x, 3) - 2
>>> unrad(eq)
(_p**3 + _p**2 - 2, [_p, _p**6 - x])
"""
_inv_error = 'cannot get an analytical solution for the inversion'
uflags = dict(check=False, simplify=False)
def _cov(p, e):
if cov:
# XXX - uncovered
oldp, olde = cov
if Poly(e, p).degree(p) in (1, 2):
cov[:] = [p, olde.subs(oldp, _solve(e, p, **uflags)[0])]
else:
raise NotImplementedError
else:
cov[:] = [p, e]
def _canonical(eq, cov):
if cov:
# change symbol to vanilla so no solutions are eliminated
p, e = cov
rep = {p: Dummy(p.name)}
eq = eq.xreplace(rep)
cov = [p.xreplace(rep), e.xreplace(rep)]
# remove constants and powers of factors since these don't change
# the location of the root; XXX should factor or factor_terms be used?
eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True))
if eq.is_Mul:
args = []
for f in eq.args:
if f.is_number:
continue
if f.is_Pow and _take(f, True):
args.append(f.base)
else:
args.append(f)
eq = Mul(*args) # leave as Mul for more efficient solving
# make the sign canonical
free = eq.free_symbols
if len(free) == 1:
if eq.coeff(free.pop()**degree(eq)).could_extract_minus_sign():
eq = -eq
elif eq.could_extract_minus_sign():
eq = -eq
return eq, cov
def _Q(pow):
# return leading Rational of denominator of Pow's exponent
c = pow.as_base_exp()[1].as_coeff_Mul()[0]
if not c.is_Rational:
return S.One
return c.q
# define the _take method that will determine whether a term is of interest
def _take(d, take_int_pow):
# return True if coefficient of any factor's exponent's den is not 1
for pow in Mul.make_args(d):
if not (pow.is_Symbol or pow.is_Pow):
continue
b, e = pow.as_base_exp()
if not b.has(*syms):
continue
if not take_int_pow and _Q(pow) == 1:
continue
free = pow.free_symbols
if free.intersection(syms):
return True
return False
_take = flags.setdefault('_take', _take)
cov, nwas, rpt = [flags.setdefault(k, v) for k, v in
sorted(dict(cov=[], n=None, rpt=0).items())]
# preconditioning
eq = powdenest(factor_terms(eq, radical=True))
eq, d = eq.as_numer_denom()
eq = _mexpand(eq, recursive=True)
if eq.is_number:
return
syms = set(syms) or eq.free_symbols
poly = eq.as_poly()
gens = [g for g in poly.gens if _take(g, True)]
if not gens:
return
# check for trivial case
# - already a polynomial in integer powers
if all(_Q(g) == 1 for g in gens):
return
# - an exponent has a symbol of interest (don't handle)
if any(g.as_base_exp()[1].has(*syms) for g in gens):
return
def _rads_bases_lcm(poly):
# if all the bases are the same or all the radicals are in one
# term, `lcm` will be the lcm of the denominators of the
# exponents of the radicals
lcm = 1
rads = set()
bases = set()
for g in poly.gens:
if not _take(g, False):
continue
q = _Q(g)
if q != 1:
rads.add(g)
lcm = ilcm(lcm, q)
bases.add(g.base)
return rads, bases, lcm
rads, bases, lcm = _rads_bases_lcm(poly)
if not rads:
return
covsym = Dummy('p', nonnegative=True)
# only keep in syms symbols that actually appear in radicals;
# and update gens
newsyms = set()
for r in rads:
newsyms.update(syms & r.free_symbols)
if newsyms != syms:
syms = newsyms
gens = [g for g in gens if g.free_symbols & syms]
# get terms together that have common generators
drad = dict(list(zip(rads, list(range(len(rads))))))
rterms = {(): []}
args = Add.make_args(poly.as_expr())
for t in args:
if _take(t, False):
common = set(t.as_poly().gens).intersection(rads)
key = tuple(sorted([drad[i] for i in common]))
else:
key = ()
rterms.setdefault(key, []).append(t)
others = Add(*rterms.pop(()))
rterms = [Add(*rterms[k]) for k in rterms.keys()]
# the output will depend on the order terms are processed, so
# make it canonical quickly
rterms = list(reversed(list(ordered(rterms))))
ok = False # we don't have a solution yet
depth = sqrt_depth(eq)
if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2):
eq = rterms[0]**lcm - ((-others)**lcm)
ok = True
else:
if len(rterms) == 1 and rterms[0].is_Add:
rterms = list(rterms[0].args)
if len(bases) == 1:
b = bases.pop()
if len(syms) > 1:
free = b.free_symbols
x = set([g for g in gens if g.is_Symbol]) & free
if not x:
x = free
x = ordered(x)
else:
x = syms
x = list(x)[0]
try:
inv = _solve(covsym**lcm - b, x, **uflags)
if not inv:
raise NotImplementedError
eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0])
_cov(covsym, covsym**lcm - b)
return _canonical(eq, cov)
except NotImplementedError:
pass
else:
# no longer consider integer powers as generators
gens = [g for g in gens if _Q(g) != 1]
if len(rterms) == 2:
if not others:
eq = rterms[0]**lcm - (-rterms[1])**lcm
ok = True
elif not log(lcm, 2).is_Integer:
# the lcm-is-power-of-two case is handled below
r0, r1 = rterms
if flags.get('_reverse', False):
r1, r0 = r0, r1
i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly())
i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly())
for reverse in range(2):
if reverse:
i0, i1 = i1, i0
r0, r1 = r1, r0
_rads1, _, lcm1 = i1
_rads1 = Mul(*_rads1)
t1 = _rads1**lcm1
c = covsym**lcm1 - t1
for x in syms:
try:
sol = _solve(c, x, **uflags)
if not sol:
raise NotImplementedError
neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \
others
tmp = unrad(neweq, covsym)
if tmp:
eq, newcov = tmp
if newcov:
newp, newc = newcov
_cov(newp, c.subs(covsym,
_solve(newc, covsym, **uflags)[0]))
else:
_cov(covsym, c)
else:
eq = neweq
_cov(covsym, c)
ok = True
break
except NotImplementedError:
if reverse:
raise NotImplementedError(
'no successful change of variable found')
else:
pass
if ok:
break
elif len(rterms) == 3:
# two cube roots and another with order less than 5
# (so an analytical solution can be found) or a base
# that matches one of the cube root bases
info = [_rads_bases_lcm(i.as_poly()) for i in rterms]
RAD = 0
BASES = 1
LCM = 2
if info[0][LCM] != 3:
info.append(info.pop(0))
rterms.append(rterms.pop(0))
elif info[1][LCM] != 3:
info.append(info.pop(1))
rterms.append(rterms.pop(1))
if info[0][LCM] == info[1][LCM] == 3:
if info[1][BASES] != info[2][BASES]:
info[0], info[1] = info[1], info[0]
rterms[0], rterms[1] = rterms[1], rterms[0]
if info[1][BASES] == info[2][BASES]:
eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3
ok = True
elif info[2][LCM] < 5:
# a*root(A, 3) + b*root(B, 3) + others = c
a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB']
# zz represents the unraded expression into which the
# specifics for this case are substituted
zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 -
3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 +
3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 -
63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 -
21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d +
45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 -
18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 +
9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 +
3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 -
60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 +
3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 -
126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 -
9*c*d**8 + d**9)
def _t(i):
b = Mul(*info[i][RAD])
return cancel(rterms[i]/b), Mul(*info[i][BASES])
aa, AA = _t(0)
bb, BB = _t(1)
cc = -rterms[2]
dd = others
eq = zz.xreplace(dict(zip(
(a, A, b, B, c, d),
(aa, AA, bb, BB, cc, dd))))
ok = True
# handle power-of-2 cases
if not ok:
if log(lcm, 2).is_Integer and (not others and
len(rterms) == 4 or len(rterms) < 4):
def _norm2(a, b):
return a**2 + b**2 + 2*a*b
if len(rterms) == 4:
# (r0+r1)**2 - (r2+r3)**2
r0, r1, r2, r3 = rterms
eq = _norm2(r0, r1) - _norm2(r2, r3)
ok = True
elif len(rterms) == 3:
# (r1+r2)**2 - (r0+others)**2
r0, r1, r2 = rterms
eq = _norm2(r1, r2) - _norm2(r0, others)
ok = True
elif len(rterms) == 2:
# r0**2 - (r1+others)**2
r0, r1 = rterms
eq = r0**2 - _norm2(r1, others)
ok = True
new_depth = sqrt_depth(eq) if ok else depth
rpt += 1 # XXX how many repeats with others unchanging is enough?
if not ok or (
nwas is not None and len(rterms) == nwas and
new_depth is not None and new_depth == depth and
rpt > 3):
raise NotImplementedError('Cannot remove all radicals')
flags.update(dict(cov=cov, n=len(rterms), rpt=rpt))
neq = unrad(eq, *syms, **flags)
if neq:
eq, cov = neq
eq, cov = _canonical(eq, cov)
return eq, cov
from sympy.solvers.bivariate import (
bivariate_type, _solve_lambert, _filtered_gens)
|
sahilshekhawat/sympy
|
sympy/solvers/solvers.py
|
Python
|
bsd-3-clause
| 120,569
|
[
"Gaussian"
] |
5b6a2a982acd6e2258d22ac88b4acb69cd238e6dfe600a960e62a6daed27a86a
|
import os
try:
HOME = os.environ['HOME']
except KeyError:
raise Exception("Please add an HOME environment variable corresponding\
to the location of the user's home directory.")
try:
NC_HOME = os.environ['NC_HOME']
except KeyError:
raise Exception("Please add an NC_HOME environment variable corresponding\
to the location of the neuroConstruct directory.")
try:
JYTHON_HOME = os.environ['JYTHON_HOME']
except KeyError:
raise Exception("Please add an JYTHON_HOME environment variable corresponding\
to the location of the jython binary directory.")
#os.path.dirname(os.path.realpath(__file__))
#NC_HOME = os.path.dirname(NEUROUNIT_HOME)
try:
with open(os.path.join(NC_HOME,'nC.sh')): pass
except IOError:
raise Exception("The NC_HOME environment variable does not correspond to\
the location of a neuroConstruct installation (no nC.sh).")
SIMULATORS = ["NEURON"]
OSB_MODELS = os.path.join(NC_HOME,"osb")
NEUROML2_MODELS = os.path.join(HOME,"NeuroML2")
SIM_CONFIGS = ["Default Simulation Configuration"]
CELL_TYPE = "hippocampus/CA1_pyramidal_neuron"
MODEL_NAME = "CA1PyramidalCell"
POPULATION_NAME = "CG_CML_0"
EXECNET_SOCKET_PORT = 8889
AUTOMATIC_SOCKET = True
MAX_TRIES = 100
PRINT_DEBUG = False
def print_debug(str):
if PRINT_DEBUG:
print(str)
|
rgerkin/neuroConstruct
|
pythonnC/utils/constants.py
|
Python
|
gpl-2.0
| 1,335
|
[
"NEURON"
] |
14c2b8a416f1fc4c516316b50f9b9f9a45fe43078f71e942950ec5e12374cc95
|
#!/usr/bin/env python
# -*- coding: ascii -*-
"""signal_processing.py - a class for signal processing"""
import numpy as np
import datetime
import settings
import serial_interface
from defines import *
class SignalProcessor:
"""This class provides the essential signal processing algorithms"""
def __init__(self):
# Define variables for function filterWaveform()
self.value_last_running_max = -np.inf
self.counter_running_max = 0
self.time_diff = None
self.max_val_list = np.array([])
# Define variables for function estimate_trigger()
self.delta_times = np.zeros(30)
# Get time for trigger algorithm
self.curr_time = datetime.datetime.now()
# Create serial interface thread:
if settings.determine_if_under_testing():
self.serial_interface = serial_interface.SerialInterface('')
else:
self.serial_interface = serial_interface.SerialInterface('/dev/ttyUSB0')
# Start serial interface thread
self.serial_interface.start()
def clear(self):
self.serial_interface.clear()
def filter_waveform(self, input_raw_signal, input_output_signal, input_param_1, input_param_2, input_param_3):
"""This function filters the video signal and thereby obtains a waveform more similar to pulse oximetry.
This is a real-time implementation of the algorithm described in:
Spicher N, Maderwald S, Ladd ME and Kukuk M. High-speed, contact-free measurement of the photoplethysmography
waveform for MRI triggering Proceedings of the 24th Annual Meeting of the ISMRM, Singapore, Singapore,
07.05.-13.05.2016.
inputParam1: Number of preceding values used for filtering (standard value: 9)
inputParam2: Number of times the running maximum signal has to be stable (standard value: 3)
inputParam3: Minimum time (in sec) until a new trigger can be sent (standard value: 0.5)
Please note that the curve fit is computed at the moment without Gaussian weights.
"""
# Get signals
raw_signal = input_raw_signal
output_signal = input_output_signal
# Normalize values
values_norm = self.normalize(raw_signal)
# Perform pseudo-derivation
values_norm_diff = np.abs(np.diff(values_norm))
# Apply window
values_norm_diff_window = values_norm_diff[-input_param_1:]
# Prepare fit
values_x_data = np.linspace(0, 1, input_param_1)
# Apply curve fit
value_m = self.__curve_fit(values_x_data, values_norm_diff_window)
# Get output: Computed signal
output_signal = np.append(output_signal, value_m[0])
# Apply running max window
value_running_max = np.amax(output_signal[-input_param_1:])
# Increase counter if running max is equal to last value. Otherwise reset counter.
if value_running_max == self.value_last_running_max:
self.counter_running_max += 1
else:
self.counter_running_max = 0
self.value_last_running_max = value_running_max
# Compute time since last trigger was sent
self.time_diff = (datetime.datetime.now() - self.curr_time).total_seconds()
# If the running maximum was stable long enough and enough time has passed, return True
if self.counter_running_max == input_param_2 and self.time_diff > input_param_3:
# Reset counter
self.counter_running_max = 0
# Reset time
self.curr_time = datetime.datetime.now()
# Send trigger
self.serial_interface.send_trigger(0)
return True, output_signal
else:
return False, output_signal
def compute_heart_rate(self, input_raw_signal, estimated_fps):
"""This simple algorithm computes the heart rate as described in:
Spicher N, Maderwald S, Ladd ME and Kukuk M. Heart rate monitoring in ultra-high-field MRI using frequency
information obtained from video signals of the human skin compared to electrocardiography and pulse oximetry.
Proceedings of the 49th Annual Conference of the German Society for Biomedical Engineering, Luebeck, Germany,
16.-18.09.2015.
Please note that the different length of the input signal N and that a moving average filter as described in
section 2.4) of the reference is not applied.
"""
# Get normalized signal
signal = self.normalize(input_raw_signal)
# Store number of elements in signal
n = np.size(signal)
# Store FPS of video stream
fps = estimated_fps
# Parameters: Minimal and maximum HR (48..180 bpm)
hr_min = 0.5
hr_max = 3
# Get current settings
curr_settings, curr_parameters = settings.get_parameters()
# Apply zero padding if it is enabled
if curr_parameters[IDX_ZERO_PADDING]:
# Compute next power of 2 from N
next_n = self.nextpow2(self.nextpow2(n))
# Zero padding: Fill before and after signal with zeros
number_before, number_after = self.compute_zero_padding_values(next_n - n)
signal = np.concatenate((np.zeros(int(number_before)), signal, np.zeros(int(number_after))), 0)
# Use new N value instead
n = next_n
# Use Hamming window on signal
values_win = signal[0:n] * np.hamming(n)
# Compute FFT
signal_fft = np.fft.fft(values_win)
# Compute frequency axis
x = np.linspace(0, n / fps, n + 1)
freq_axis = np.fft.fftfreq(len(values_win), x[1] - x[0])
# Get boolean values if values are between hrMin and hrMax
limits_bool = (hr_min < freq_axis) & (hr_max > freq_axis)
limits_idx = np.linspace(0, n - 1, n)
# Get indices of frequencies between hrMin and hrMax
limits = limits_idx[limits_bool.nonzero()]
limits = limits.astype(int)
# Get index of maximum frequency in FFT spectrum
max_val = limits[np.argmax(abs(signal_fft[limits]))]
# Return HR, spectrum with frequency axis, and found maximum
return (np.round(freq_axis[max_val] * 60)), abs(signal_fft[limits]), freq_axis[limits], max_val - limits[0]
def estimate_trigger(self, input_raw_signal, estimated_fps, input_param_1):
"""This simple algorithm computes MRI triggers as described in:
Spicher N, Kukuk M, Ladd ME and Maderwald S. In vivo 7T MR imaging triggered by phase information obtained from
video signals of the human skin. Proceedings of the 23nd Annual Meeting of the ISMRM, Toronto, Canada,
30.05.-05.06.2015.
inputParam1: Number of preceding values used for filtering
"""
# Get normalized signal
signal = self.normalize(input_raw_signal)
# Store number of elements in signal
n = np.size(signal)
# Store FPS of video stream
fps = estimated_fps
# Parameters: Minimal and maximum HR (48..180 bpm)
hr_min = 0.5
hr_max = 3
# Use Hamming window on signal
values_win = signal[0:n] * np.hamming(n)
# Compute FFT
signal_fft = np.fft.fft(values_win)
# Get phase
signal_phase = np.angle(signal_fft)
# Compute frequency axis
x = np.linspace(0, n / fps, n + 1)
freq_axis = np.fft.fftfreq(len(values_win), x[1] - x[0])
# Get boolean values if values are between hrMin and hrMax
limits_bool = (hr_min < freq_axis) & (hr_max > freq_axis)
limits_idx = np.linspace(0, n - 1, n)
# Get indices of frequencies between hrMin and hrMax
limits = limits_idx[limits_bool.nonzero()]
limits = limits.astype(int)
# Get index of maximum frequency in FFT spectrum
max_val = limits[np.argmax(abs(signal_fft[limits]))]
# Average
if np.count_nonzero(input_raw_signal) >= 400:
if np.size(self.max_val_list) < input_param_1:
self.max_val_list = np.append(self.max_val_list, max_val)
else:
self.max_val_list = np.delete(self.max_val_list, 0)
self.max_val_list = np.append(self.max_val_list, max_val)
max_val = np.round(np.mean(self.max_val_list), 0)
# Compute time until next maximum in signal
if signal_phase[max_val] < 0:
self.delta = np.abs(signal_phase[max_val] / (2 * np.pi * freq_axis[max_val]))
else:
self.delta = (1 / freq_axis[max_val]) - np.abs(signal_phase[max_val] / (2 * np.pi * freq_axis[max_val]))
# If there are enough values
if np.count_nonzero(input_raw_signal) >= 400:
ret_1, ret_2 = self.serial_interface.send_trigger(self.delta)
if ret_1:
# Drop first value of array and add at end
self.delta_times = np.delete(self.delta_times, 0)
self.delta_times = np.append(self.delta_times, ret_2)
# Return HR and waiting time until next trigger
return (np.round(freq_axis[max_val] * 60)), abs(signal_fft[limits]), \
freq_axis[limits], max_val - limits[0], self.delta_times
def normalize(self, input_signal):
"""Normalize the signal to lie between 0 and 1"""
output_signal = input_signal
# Prohibit dividing by zero
if np.max(np.abs(output_signal)) > 0:
max_val = np.max(np.abs(output_signal))
min_val = np.min(np.abs(output_signal))
# MinMax normalization
output_signal = (output_signal - min_val) / (max_val - min_val)
return output_signal
def __curve_fit(self, input_signal_1, input_signal_2):
"""perform curve fitting and return slope value"""
# Todo: Add gaussian weights
m = np.polyfit(input_signal_1, input_signal_2, 1)
return m
def nextpow2(self, number):
"""Simple implementation of MATLAB nextpow2() """
curr_value = 2
while curr_value <= number:
curr_value *= 2
return curr_value
def compute_zero_padding_values(self, number):
"""During zero padding, we want to fill zeros before and after signal.
This function computes the number of zeros"""
number_of_zeros_before_signal = np.floor(number / 2)
if np.fmod(number, 2) == 1:
number_of_zeros_after_signal = number_of_zeros_before_signal + 1
else:
number_of_zeros_after_signal = number_of_zeros_before_signal
return number_of_zeros_before_signal, number_of_zeros_after_signal
|
nspi/vbcg
|
src/signal_processing.py
|
Python
|
gpl-3.0
| 10,773
|
[
"Gaussian"
] |
718cffdc2f5b78fff938882f5ea3bb1ee7d577780f6d6bb81d556544d5608484
|
import sys
from netCDF4 import Dataset
def main():
copy_gridded_ioapi_to_empty(sys.argv[1], sys.argv[2])
def copy_gridded_ioapi_to_empty(input_path, output_path):
'''Copy a GRIDDED IOAPI NetCDF file (NETCDF3_CLASSIC)
to an empty NetCDF file that has the same structure,
but the variables are empty of data.'''
# open input NetCDF file
fin = Dataset(input_path, 'r', format='NETCDF3_CLASSIC')
# read variables
fin_var = {}
for var in fin.variables:
fin_var[var] = [var, fin.variables[var].units, fin.variables[var].var_desc]
# read attributes
fin_attr = {}
for attr in fin.ncattrs():
fin_attr[attr] = getattr(fin, attr)
# open output NetCDF file
fout = Dataset(output_path, 'w', format='NETCDF3_CLASSIC')
# create the 6 GRIDDED IOAPI dimensions
TSTEP = fout.createDimension('TSTEP', None)
DATE_TIME = fout.createDimension('DATE-TIME', 2)
LAY = fout.createDimension('LAY', fin.NLAYS)
VAR = fout.createDimension('VAR', fin.NVARS)
ROW = fout.createDimension('ROW', fin.NROWS)
COL = fout.createDimension('COL', fin.NCOLS)
# close input file
fin.close()
# variable and attribute definitions
TFLAG = fout.createVariable('TFLAG', 'i4', ('TSTEP', 'VAR', 'DATE-TIME'))
TFLAG.units = '<YYYYDDD,HHMMSS>'
TFLAG.long_name = 'TFLAG'
TFLAG.var_desc = 'Timestep-valid flags: (1) YYYYDDD or (2) HHMMSS'
# remaining variables and attribute definitions
for key in fin_var:
species = key
if species == 'TFLAG':
continue
fout.createVariable(species, 'f4', ('TSTEP', 'LAY', 'ROW', 'COL'))
fout.variables[species].long_name = species
fout.variables[species].units = fin_var[species][1]
fout.variables[species].var_desc = fin_var[species][2]
# global attributes
for name in fin_attr:
setattr(fout, name, fin_attr[name])
if __name__ == '__main__':
main()
|
theJollySin/python_for_scientists
|
classes/14_netcdf/make_gridded_ioapi_template.py
|
Python
|
gpl-3.0
| 1,964
|
[
"NetCDF"
] |
19b7e7a856bdc1b98f1d164cb585e0cf2f0e146032b5c946e56fbc67544ba373
|
# !/usr/local/bin/python3.4.2
# ----Copyright (c) 2017 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/quality-control/blob/master/LICENSE----
# run script with 3 or more arguments:
# argument 0 is the script name
# argument 1 is the path to the Legacy IDs and Upload Dates
# argument 2 is the path to the Asset IDs and Legacy IDs
# argument 3 is the path you want to output the matched CSV to
# argument 4 is a term to add to the output filename for disambiguation
import csv
import os
from os.path import isfile, join, split
import sys
import io
filePath_1 = str(sys.argv[1])
filePath_2 = str(sys.argv[2])
filePath_3 = str(sys.argv[3])
batch = str(sys.argv[4])
dateDict = {}
legacyIdDict = {}
simplifiedDict = {}
with open(filePath_1, 'rU') as f, open(filePath_2, 'rU') as g:
dateData = csv.reader(f, dialect='excel', delimiter=',')
next(dateData, None) # skip the headers
idData = csv.reader(g,dialect='excel', delimiter=',')
for row in idData:
dams_id = row[0]
legacy_id = row[1]
legacyIdDict[legacy_id] = dams_id
for row in dateData:
legacy_id_2 = row[0]
uploadDate = row[1]
if legacy_id_2 not in legacyIdDict:
print('Not in DAMS: ',legacy_id_2)
else:
legacy_id = legacyIdDict[legacy_id_2]
simplifiedDict[legacy_id] = uploadDate
outputPath = ''.join([str(filePath_3), '/UploadDates', batch, '.csv'])
with open(outputPath, 'w', newline='') as csvfile:
w = csv.writer(csvfile, dialect='excel', delimiter=',')
w.writerow(["DAMS ID", "Digital Accession Date"])
for k,v in simplifiedDict.items():
w.writerow([k,v])
#need date value to be formatted as YYYY-MM-DD
|
CarnegieHall/quality-control
|
matchvaluesfromlists_2.py
|
Python
|
mit
| 1,680
|
[
"VisIt"
] |
a3cc8fa13b52eccf394b90b222849b8b50446ed23240594556263f412d007314
|
#!/usr/bin/env python
"""
Converts an atom selection string from psf/resid to corresponding
AMBER residues
Author: Robin Betz
Copyright (C) 2015 Robin Betz
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330
Boston, MA 02111-1307, USA.
"""
from __future__ import print_function
from vmd import atomsel, molecule
import itertools
import readline
from glob import glob
_acids = ('ACE ALA ARG ASN ASP CYS CYX GLN GLU GLY HIE HIS HSP HSE '
'HSD ILE LEU LYS MET NMA PHE PRO SER THR TRP TYR VAL')
# For python 2, want raw_input
if hasattr(__builtins__, "raw_input"):
input = raw_input
#==============================================================================
def parseSelection(inputstr):
"""
Parses an input number selection string. Allows
ranges separated by - and comma separated values.
Examples: 1,2-5, 67,68-70
Returns a set
"""
selection = []
tokens = [x.strip() for x in inputstr.split(',')]
for t in tokens:
if "-" in t:
r = [int(k.strip()) for k in t.split('-')]
if len(r) > 1:
r.sort()
selection.extend(range(r[0],r[-1]+1))
else:
selection.append(int(t))
return set(selection)
#==============================================================================
def groupOutput(inputset):
"""
Groups the integers in input set into ranges
in a string parseable by parseSelection
"""
# Find ranges using itertools
def ranges(i):
for a,b in itertools.groupby(enumerate(i),
lambda x: x[1]-x[0]):
b = list(b)
yield b[0][1], b[-1][1]
l = list(ranges(inputset))
# Put tuples together into a passable list
result = ""
for i in l:
if i[0] == i[1]: result += "%d," % i[0]
else: result += "%d-%d," % (i[0],i[1])
return result[:-1]
#==============================================================================
# Autocomplete directories at prompt
def complete(text, state):
return (glob(text+'*')+[None])[state]
readline.parse_and_bind("tab: complete")
readline.set_completer_delims(' \t\n;')
readline.set_completer(complete)
#==============================================================================
print("\n-----------------------------------------------------------------------")
print("What is your psf file?")
psf = input("> ")
molid = molecule.load('psf', psf)
print("\n-----------------------------------------------------------------------")
print("Enter a valid VMD atom selection string here")
#print("Enter your selection set of resids here.")
#print("Comma separated entries and ranges with - are allowed")
#print("Example: '1,2-5,6-10'")
print("NOTE: This will pull out protein residues ONLY")
inputstr = input("> ")
residues = set()
resids = parseSelection(inputstr)
for r in resids:
rs = set(atomsel("resname %s and resid %d" % (_acids, r)).get('residue'))
if len(rs) != 1:
print("\n\nNone or duplicate residue matching resid %d" % r)
chains = set(atomsel("resid %d" % r).get("chain"))
if len(chains) > 1:
print("I found multiple chains for this resid: %s" % chains)
else:
print("Something is messed up with the residue definition")
quit(1)
residues.add(int(rs.pop())+1)
result = groupOutput(residues)
print("----------------------------------------------------------------------")
print("Here is your residue selection string for use with the matching prmtop:\n")
print(result)
print()
|
Eigenstate/dabble
|
get_restraint_mask.py
|
Python
|
gpl-2.0
| 4,191
|
[
"Amber",
"VMD"
] |
bc8fdd9156844d2309647be978e5913219d6ee70ce1655f1090ca1763683ba2f
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2012 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Base classes for editors """
import logging
import gtk
from gtk import gdk
from kiwi.enums import ListType
from kiwi.ui.delegates import GladeSlaveDelegate
from kiwi.ui.listdialog import ListContainer
from kiwi.ui.widgets.label import ProxyLabel
from stoqlib.database.runtime import StoqlibStore
from stoqlib.gui.base.dialogs import RunnableView, BasicDialog, run_dialog
from stoqlib.gui.events import (EditorSlaveCreateEvent, EditorSlaveConfirmEvent,
EditorCreateEvent)
from stoqlib.lib.decorators import public
from stoqlib.lib.message import yesno
from stoqlib.lib.translation import stoqlib_gettext
log = logging.getLogger(__name__)
_ = stoqlib_gettext
def _get_slaves_recursively(editor_slave):
slaves = []
for slave in editor_slave.slaves.values():
if isinstance(slave, BaseEditorSlave):
slaves.append(slave)
slaves.extend(_get_slaves_recursively(slave))
return slaves
class BaseEditorSlave(GladeSlaveDelegate):
""" Base class for editor slaves inheritance. It offers methods for
setting up focus sequence, required attributes and validated attrs.
:cvar gladefile:
:cvar model_type:
"""
domain = 'stoq'
gladefile = None
model_type = None
fields = None
proxy_widgets = ()
def __init__(self, store, model=None, visual_mode=False, edit_mode=None):
""" A base class for editor slaves inheritance
:param store: a store
:param model: the object model tied with the proxy widgets
:param visual_mode: does this slave must be opened in visual mode?
if so, all the proxy widgets will be disable
:param edit_mode: indicate if the slave are being edited or its a new one
The editor that creates the slave should pass his
property edit_mode to the slaves.
If None self.edit_mode will be defined accordingly to
the model
"""
self.store = store
# FIXME: Need to check if we need to add edit_mode parameter for all classes
# that inherit from BaseEditor
if edit_mode is not None:
self.edit_mode = edit_mode
else:
self.edit_mode = model is not None
self.visual_mode = visual_mode
if model:
created = ""
else:
created = "created "
model = self.create_model(self.store)
if model is None:
fmt = "%s.create_model() must return a valid model, not %r"
raise ValueError(fmt % (self.__class__.__name__, model))
log.info("%s editor using a %smodel %s" % (
self.__class__.__name__, created, type(model).__name__))
if self.model_type:
if not isinstance(model, self.model_type):
fmt = '%s editor requires a model of type %s, got a %r'
raise TypeError(
fmt % (self.__class__.__name__,
self.model_type.__name__,
model))
else:
fmt = "Editor %s must define a model_type attribute"
raise ValueError(fmt % (self.__class__.__name__, ))
self.model = model
GladeSlaveDelegate.__init__(self, gladefile=self.gladefile)
if self.visual_mode:
self._setup_visual_mode()
self.setup_proxies()
self.setup_slaves()
EditorSlaveCreateEvent.emit(self, model, store, visual_mode)
#
# Private
#
def _setup_visual_mode(self):
widgets = self.__class__.proxy_widgets
for widget_name in widgets:
widget = getattr(self, widget_name)
if isinstance(widget, ProxyLabel):
pass
elif isinstance(widget, gtk.Entry):
# First, we need to be able to select text from entries
widget.set_editable(False)
# Second, make sure they don't look like they're editable,
# copy over the insentive style
style = widget.get_style()
widget.modify_text(
gtk.STATE_NORMAL, style.text[gtk.STATE_INSENSITIVE])
widget.modify_base(
gtk.STATE_NORMAL, style.base[gtk.STATE_INSENSITIVE])
else:
widget.set_sensitive(False)
if self.fields:
for field in self.fields.values():
field.set_sensitive(False)
field.can_edit = False
self.update_visual_mode()
#
# Public API
#
def confirm(self):
"""Confirms the editor
Before actually confirming, we will verify that
:meth:`.validate_confirm` (and all slaves validate_confirm)
returns ``True``.
Only if none of those calls return ``False``, we will call
:meth:`.on_confirm` on the editor and all the slaves.
:class:`BaseEditor` will call this when someone clicks it's
*confirm* button.
"""
slaves = _get_slaves_recursively(self)
slaves.append(self)
# Do this before trying to confirm any of the slaves for safety
if not all([slave.is_valid and slave.validate_confirm() for
slave in slaves]):
return False
for slave in slaves:
slave.on_confirm()
EditorSlaveConfirmEvent.emit(self, self.model, self.store, self.visual_mode)
return True
def cancel(self):
"""Cancels the editor
By calling this, the editor will get cancelled. All of the
editor's slaves :meth:`.on_cancel`, including the editor's
itself will get called.
:class:`BaseEditor` will call this when someone clicks it's
*cancel* button.
"""
slaves = _get_slaves_recursively(self)
slaves.append(self)
for slave in slaves:
slave.on_cancel()
def create_model(self, store):
"""Creates a new model for the editor.
After this method is called, the model can be accessed as self.model.
The default behavior is to raise a TypeError, which can
be overridden in a subclass.
:param store: a store
"""
raise TypeError(
"%r needs a model, got None. Perhaps you want to "
"implement create_model?" % (self.__class__.__name__))
def setup_proxies(self):
"""A subclass can override this to setup proxies
This is called at the end of the object initialization and before
:meth:`.setup_slaves`. If you need to add a proxy, it should
be done here, eg:
self.proxy = self.add_proxy(self.model, self.proxy_widgets)
"""
def setup_slaves(self):
"""A subclass can override this to setup slaves
This is called at the end of the object initialization and after
:meth:`.setup_proxies`. If you need to add slaves, it should
be done here, eg:
self.attach_slave('slave_holder', SlaveClass())
"""
def on_cancel(self):
"""Called when the dialog is about to get cancelled
When someone calls :meth:`.cancel` (normally a :class:`BaseEditor`:
when clicking on it's *cancel* button), this will be called. If you
need to do some kind of cleanup when cancelling, like removing an
object that was already commited, this is the right place to do it.
Some places (like :func:`stoqlib.gui.base.dialogs.run_dialog`) will
consider the editor's retval as :obj:`.retval`. Depending on the
case, if you want to confirm the editor even when it was cancelled
(e.g. Asking the user "Do you want to save those changes?")
you can do:
self.retval = True
Or simply do what you want in another store and commit it.
"""
def on_confirm(self):
"""Called when the dialog is about to get confirmed
When someone calls :meth:`.confirm` (normally a :class:`BaseEditor`:
when clicking on it's *confirm* button), this will be called. If you
need to do some kind of late modification before the object gets
commited, this is the place to do it.
Some places (like :func:`stoqlib.gui.base.dialogs.run_dialog`) will
consider the editor's retval as :obj:`.retval`. Depending on the
case, if you want to cancel the editor even when it was confirmed
you can do:
self.retval = False
Or simply do a rollback on the actual store.
"""
def update_visual_mode(self):
"""Called when the editor enters on visual mode
When the editor is readonly (aka visual mode), every proxy widgets
and fields will be set insensitive. You should override this if
you have any kind of extra work to do when entering visual mode.
"""
def validate_confirm(self):
"""Called to see if the editor can be confirmed
The editor won't get confirmed if this returns ``False``. It's
recommended to show a message for the user, as a feedback, to
explain why this didn't confirm. By default, this returns ``True``.
"""
return True
@public(since="1.5.0")
class BaseEditor(BaseEditorSlave, RunnableView):
""" Base class for editor dialogs. It offers methods of
BaseEditorSlave, a windows title and OK/Cancel buttons.
"""
#: the model type name of the model we are editing.
#: This value will be showed in the title of the editor and can not
#: be merely the attribute __name__ of the object for usability reasons.
#: Call sites will decide what could be the best name applicable in each
#: situation.
model_name = None
header = ''
size = ()
title = None
hide_footer = False
#: if we need to ask the user if he really wants to cancel the dialog if
#: there are any changes done that would be lost otherwise
need_cancel_confirmation = False
#: a list of widget names that when activated will confirm the dialog
confirm_widgets = ()
help_section = None
form_holder_name = 'toplevel'
def __init__(self, store, model=None, visual_mode=False):
self._confirm_disabled = False
# FIXME:
# BasicEditor should inheirt from BasicDialog and instantiate
# the slave inside here, but it requires some major surgery
BaseEditorSlave.__init__(self, store, model,
visual_mode=visual_mode)
self.main_dialog = BasicDialog(title=self.get_title(self.model),
header_text=self.header,
help_section=self.help_section,
size=self.size)
# Do not close the dialog if re return False on self.confirm
self.main_dialog.enable_confirm_validation = True
self.main_dialog.attach_slave("main", self)
self.main_dialog.connect('confirm', self._on_main_dialog__confirm)
self.main_dialog.connect('cancel', self._on_main_dialog__cancel)
dialog_toplevel = self.main_dialog.get_toplevel()
dialog_toplevel.connect('response', self._on_toplevel__response)
dialog_toplevel.connect('delete-event', self._on_toplevel__delete_event)
# This helps kiwis ui test, set the name of ourselves to
# the classname of the slave, which is much more helpful than
# just "BasicDialog"
self.main_dialog.get_toplevel().set_name(self.__class__.__name__)
if self.hide_footer or self.visual_mode:
self.main_dialog.hide_footer()
for name in self.confirm_widgets:
self.set_confirm_widget(getattr(self, name))
self.register_validate_function(self._validation_function)
self.force_validation()
# We need to use self.model instead of model, since BaseEditorSlave
# will create one if its None
EditorCreateEvent.emit(self, self.model, store, visual_mode)
if store is not None:
# This needs to be the last thing done on __init__ since we don't want
# to consider things like self.create_model as a change
self._store_pending_count = store.get_pending_count()
#
# Private
#
def _get_title_format(self):
if self.visual_mode:
return _(u"Details of %s")
if self.edit_mode:
return _(u'Edit Details of "%s"')
return _(u"Add %s")
def _need_cancel_confirmation(self):
return self.need_cancel_confirmation and self.has_changes()
#
# Public
#
def has_changes(self):
"""Check if there are changes on this editor
By default we will check if there're any pending changes on
:obj:`.store` and that information will be used by
:attr:`.need_cancel_confirmation`
"""
if self.store is None:
return False
return self.store.get_pending_count() > self._store_pending_count
def get_title(self, model):
if self.title:
return self.title
if not model:
raise ValueError("A model should be defined at this point")
title_format = self._get_title_format()
if self.model_name:
model_name = self.model_name
else:
# Fallback to the name of the class
model_name = type(self.model).__name__
return title_format % model_name
def enable_window_controls(self):
"""Enables the window controls
See :class:`kiwi.ui.views.BaseView.enable_window_controls`.
"""
self.main_dialog.enable_window_controls()
def set_description(self, description):
"""Sets the description of the model object which is used by the editor
:param description:
"""
format = self._get_title_format()
self.main_dialog.set_title(format % description)
def refresh_ok(self, validation_value):
""" Refreshes ok button sensitivity according to widget validators
status """
if self._confirm_disabled:
return
self.main_dialog.ok_button.set_sensitive(validation_value)
def add_button(self, label=None, stock=None):
"""
Adds a button to editor. The added button is returned which you
can use to connect signals on.
:param label: label of the button
:param stock: stock label of the button
:param returns: the button added
:rtype: gtk.Button
"""
if label is None and stock is None:
raise TypeError("You need to provide a label or a stock argument")
button = gtk.Button(label=label, stock=stock)
button.props.can_focus = True
self.main_dialog.action_area.pack_start(button, False, False)
self.main_dialog.action_area.reorder_child(button, 0)
button.show()
return button
def cancel(self):
"""
Cancel the dialog.
"""
if (self._need_cancel_confirmation() and
not yesno(_("If you cancel this dialog all changes will be lost. "
"Are you sure?"), gtk.RESPONSE_NO,
_("Cancel"), _("Don't cancel"))):
return False
# set this before runing BaseEditorSlave.cancel so
# on_cancel can modify self.retval, if needed
self.retval = False
BaseEditorSlave.cancel(self)
self.main_dialog.close()
if isinstance(self.store, StoqlibStore):
self.store.retval = self.retval
log.info("%s: Closed (cancelled), retval=%r" % (
self.__class__.__name__, self.retval))
return True
def confirm(self):
"""
Confirm the dialog.
"""
# set this before runing BaseEditorSlave.confirm so
# on_confirm can modify self.retval, if needed
self.retval = self.model
if self._confirm_disabled:
return False
if not BaseEditorSlave.confirm(self):
return False
self.main_dialog.close()
if isinstance(self.store, StoqlibStore):
self.store.retval = self.retval
log.info("%s: Closed (confirmed), retval=%r" % (
self.__class__.__name__, self.retval))
return True
def enable_ok(self):
"""
Enable the ok button of the dialog, eg makes it possible
to close/confirm the dialog.
"""
self.main_dialog.enable_ok()
self._confirm_disabled = False
def disable_ok(self):
"""
Enable the ok button of the dialog, eg makes it possible
to close/confirm the dialog.
"""
self.main_dialog.disable_ok()
self._confirm_disabled = True
def enable_normal_window(self):
"""
Enable the dialog as a normal window.
This tells the window manager that the window
should behave as a normal window instead of a dialog.
"""
toplevel = self.main_dialog.get_toplevel()
toplevel.set_type_hint(gdk.WINDOW_TYPE_HINT_NORMAL)
def set_confirm_widget(self, widget_name):
"""
Make a widget confirmable, eg activating that widget would
close the dialog.
:param widget_name: name of the widget to be confirmable
"""
self.main_dialog.set_confirm_widget(widget_name)
def set_message(self, message, message_type=None):
"""Sets a message for this editor
:param message: message to add
:param message_type: type of message to add
"""
if message_type is None:
message_type = gtk.MESSAGE_INFO
self.main_dialog.set_message(message, message_type)
# RunnableView
# This delegate everything to self.main_dialog
def close(self):
self.main_dialog.close()
def run(self):
self.main_dialog.run()
def get_current_toplevel(self):
return self.main_dialog.get_current_toplevel()
def destroy(self):
self.main_dialog.destroy()
def set_transient_for(self, window):
self.main_dialog.set_transient_for(window)
# Callbacks
def _on_main_dialog__cancel(self, dialog, retval):
return self.cancel()
def _on_main_dialog__confirm(self, dialog, retval):
return self.confirm()
def _validation_function(self, is_valid):
self.refresh_ok(is_valid)
def _on_toplevel__delete_event(self, widget, *args, **kwargs):
# Avoid the dialog being closed when hitting 'Esc' and we would need
# confirm the cancelation.
if self._need_cancel_confirmation():
return True
def _on_toplevel__response(self, dialog, response, *args, **kwargs):
# FIXME: For the delete-event to really stops from destroying the
# dialog, we also need to stop the response event emission. See
# http://faq.pygtk.org/index.py?req=show&file=faq10.013.htp
# for more details
if (self._need_cancel_confirmation() and
response == gtk.RESPONSE_DELETE_EVENT):
dialog.emit_stop_by_name('response')
class BaseRelationshipEditorSlave(GladeSlaveDelegate):
"""An editor for relationships between objects
BaseRelationshipEditor provides an easy way to edit (add/remove) relations
between objects.
It doesn't allow creations of new objects, only linking between them.
(the linking might require new objects, though)
For example, you could edit suppliers for a product (or produts supplied
by an supplier).
Subclasses must implement get_targets, get_columns, get_relations, and
create_model.
"""
domain = 'stoq'
gladefile = 'RelationshipEditor'
target_name = None
model_type = None
editor = None
def __init__(self, store, parent=None, visual_mode=False):
self._parent = parent
self.store = store
self.visual_mode = visual_mode
GladeSlaveDelegate.__init__(self, gladefile=self.gladefile)
self._setup_widgets()
def _setup_relations_list(self):
self.relations_list = ListContainer(self.get_columns(), gtk.ORIENTATION_HORIZONTAL)
self.relations_list._vbox.padding = 0
self.model_vbox.pack_start(self.relations_list)
self.relations_list.set_list_type(ListType.UNADDABLE)
self.relations_list.connect('remove-item',
self._on_remove_item__clicked)
self.relations_list.connect('edit-item', self._on_edit_item__clicked)
self.relations_list.show()
def _setup_widgets(self):
self.model_name_label.set_label(self.target_name + ':')
targets = self.get_targets()
self.target_combo.prefill(targets)
self._setup_relations_list()
size_group = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)
size_group.add_widget(self.add_button)
size_group.add_widget(self.relations_list.edit_button)
size_group.add_widget(self.relations_list.remove_button)
self.add_button.set_sensitive(False)
if not self.editor:
self.relations_list.edit_button.set_sensitive(False)
self.relations_list.add_items(self.get_relations())
if self.visual_mode:
self.target_combo.set_sensitive(False)
self.add_button.set_sensitive(False)
self.relations_list.set_list_type(ListType.READONLY)
def get_targets(self):
"""Returns a list of valid taret objects.
for instance, if suppliers for a product are being edited, then this
should return a list fo suppliers.
"""
raise NotImplementedError
def get_columns(self):
"""Columns to display"""
raise NotImplementedError
def get_relations(self):
"""Returns the already existing relations.
This may be entries from a maping table or entries from the target
table itself, depending on the type of relationship
"""
raise NotImplementedError
def create_model(self):
"""This method should create the model when adding a new relationship.
If the addition is canceled. It will automatically be removed.
"""
raise NotImplementedError
def add(self):
model = self.create_model()
if not model:
return False
if not self.editor:
return model
res = run_dialog(self.editor, self._parent, self.store, model)
if not res:
self.model_type.delete(id=model.id, store=self.store)
return res
def edit(self, model):
return run_dialog(self.editor, self._parent, self.store, model)
def remove(self, model):
self.model_type.delete(model.id, store=self.store)
return True
def _run_editor(self, model=None):
"""Runs an editor for the relationship (if necessary).
An editor may be necessary only if there is an mapping table and
and extra information in this table.
"""
if model is None:
res = self.add()
else:
res = self.edit(model)
return res
def on_add_button__clicked(self, widget):
result = self._run_editor()
if result:
self.relations_list.add_item(result)
def on_target_combo__content_changed(self, widget):
has_selected = self.target_combo.read() is not None
self.add_button.set_sensitive(has_selected and not self.visual_mode)
def _on_edit_item__clicked(self, list, item):
if not self.editor:
return
result = self._run_editor(item)
if result:
self.relations_list.update_item(result)
def _on_remove_item__clicked(self, list, item):
if self.remove(item):
self.relations_list.remove_item(item)
|
tiagocardosos/stoq
|
stoqlib/gui/editors/baseeditor.py
|
Python
|
gpl-2.0
| 24,863
|
[
"VisIt"
] |
d235ce649b93ced0af96ccdd9682ef227893f30a00df03ae23d25e04334c6492
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF or Mach-O header. If this does not match the executable bit on the
file, the file will be flagged. Mach-O files are allowed to exist with or
without an executable bit set, as there are many examples of it appearing as
test data, and as Mach-O types such as dSYM that canonically do not have their
executable bits set.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
from __future__ import print_function
import json
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'exe',
)
# Files for which the executable bit may or may not be set.
IGNORED_EXTENSIONS = (
'dylib',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/app_shim/app_shim_32_bit.app/contents/'
'macos/app_mode_loader',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'base/third_party/libevent/autogen.sh',
'base/third_party/libevent/test/test.sh',
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
'third_party/wpt_tools/wpt/tools/third_party/',
# TODO(maruel): Fix these.
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/libxml/linux/xml2-config',
'third_party/protobuf/',
'third_party/sqlite/',
'third_party/tcmalloc/',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert (set(EXECUTABLE_EXTENSIONS) & set(IGNORED_EXTENSIONS) &
set(NON_EXECUTABLE_EXTENSIONS) == set())
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
git_name = 'git.bat' if sys.platform.startswith('win') else 'git'
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0].decode('utf-8', 'ignore')
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture([git_name, 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def ignored_extension(rel_path):
"""The file name represents a file type that may or may not have the
executable set.
"""
return os.path.splitext(rel_path)[1][1:] in IGNORED_EXTENSIONS
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
if sys.platform.startswith('win'):
# Using stat doesn't work on Windows, we have to ask git what the
# permissions are.
dir_part, file_part = os.path.split(full_path)
bits = capture([git_name, 'ls-files', '-s', file_part], dir_part).strip()
return bits.decode().startswith('100755')
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf_or_mach_o(full_path):
"""Returns if the file starts with #!/ or is an ELF or Mach-O binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (
data[:3] == b'#!/' or data == b'#! /',
data == '\x7fELF', # ELFMAG
data in (
'\xfe\xed\xfa\xce', # MH_MAGIC
'\xce\xfa\xed\xfe', # MH_CIGAM
'\xfe\xed\xfa\xcf', # MH_MAGIC_64
'\xcf\xfa\xed\xfe', # MH_CIGAM_64
'\xca\xfe\xba\xbe', # FAT_MAGIC
'\xbe\xba\xfe\xca', # FAT_CIGAM
'\xca\xfe\xba\xbf', # FAT_MAGIC_64
'\xbf\xba\xfe\xca')) # FAT_CIGAM_64
def check_file(root_path, rel_path):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent. Returns None on success.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF or Mach-O header and compares this with the executable bit on
the file.
"""
full_path = os.path.join(root_path, rel_path)
def result_dict(error):
return {
'error': error,
'full_path': full_path,
'rel_path': rel_path,
}
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). The Chromium
# tree may have invalid symlinks.
return None
exec_add = 'git add --chmod=+x %s' % rel_path
exec_remove = 'git add --chmod=-x %s' % rel_path
if must_be_executable(rel_path):
if not bit:
return result_dict('Must have executable bit set: %s' % exec_add)
return
if must_not_be_executable(rel_path):
if bit:
return result_dict('Must not have executable bit set: %s' % exec_remove)
return
if ignored_extension(rel_path):
return
# For the others, it depends on the file header.
(shebang, elf, mach_o) = has_shebang_or_is_elf_or_mach_o(full_path)
if bit != (shebang or elf or mach_o):
if bit:
return result_dict(
'Has executable bit but not shebang or ELF or Mach-O header: %s' %
exec_remove)
if shebang:
return result_dict('Has shebang but not executable bit: %s' % exec_add)
if elf:
return result_dict('Has ELF header but not executable bit: %s' % exec_add)
# Mach-O is allowed to exist in the tree with or without an executable bit.
def check_files(root, files):
gen = (check_file(root, f) for f in files
if not is_ignored(f) and not os.path.isdir(f))
return filter(None, gen)
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print('Found %s files' % len(self._files))
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture([git_name, 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
parser.add_option(
'--file-list',
help='Specifies a file with a list of files (one per line) to check the '
'permissions of. Only these files will be checked')
parser.add_option('--json', help='Path to JSON output file')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.files and options.file_list:
parser.error('--file and --file-list are mutually exclusive options')
if sys.platform.startswith(
'win') and not options.files and not options.file_list:
# checkperms of the entire tree on Windows takes many hours so is not
# supported. Instead just check this script.
options.files = [sys.argv[0]]
options.root = '.'
print('Full-tree checkperms not supported on Windows.')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
errors = list(check_files(options.root, options.files))
elif options.file_list:
with open(options.file_list) as file_list:
files = file_list.read().splitlines()
errors = check_files(options.root, files)
else:
api = get_scm(options.root, options.bare)
start_dir = args[0] if args else api.root_dir
errors = api.check(start_dir)
if not options.bare:
print('Processed %s files, %d files where tested for shebang/ELF/Mach-O '
'header' % (api.count, api.count_read_header))
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
if options.bare:
print('\n'.join(e['full_path'] for e in errors))
else:
print('\nFAILED\n')
print('\n'.join('%s: %s' % (e['full_path'], e['error']) for e in errors))
return 1
if not options.bare:
print('\nSUCCESS\n')
return 0
if '__main__' == __name__:
sys.exit(main())
|
ric2b/Vivaldi-browser
|
chromium/tools/checkperms/checkperms.py
|
Python
|
bsd-3-clause
| 14,907
|
[
"xTB"
] |
fc7abe75c672f15dece7e7ae4c4729aaf297551d9fc3c7389dbad9d39d8f167c
|
#!/usr/bin/env python
"""
Script converts the user certificate in the p12 format into a standard .globus usercert.pem and userkey.pem files.
Creates the necessary directory, $HOME/.globus, if needed. Backs-up old pem files if any are found.
"""
import os
import sys
import shutil
from datetime import datetime
from DIRAC import gLogger
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Base.Script import Script
@Script()
def main():
Script.registerArgument("P12: user certificate in the p12")
_, args = Script.parseCommandLine(ignoreErrors=True)
p12 = args[0]
if not os.path.isfile(p12):
gLogger.fatal("%s does not exist." % p12)
sys.exit(1)
globus = os.path.join(os.environ["HOME"], ".globus")
if not os.path.isdir(globus):
gLogger.notice(f"Creating {globus} directory")
os.mkdir(globus)
cert = os.path.join(globus, "usercert.pem")
key = os.path.join(globus, "userkey.pem")
nowPrefix = "." + datetime.now().isoformat()
for old in [cert, key]:
if os.path.isfile(old):
gLogger.notice(f"Back up {old} file to {old + nowPrefix}.")
shutil.move(old, old + nowPrefix)
# new OpenSSL version require OPENSSL_CONF to point to some accessible location',
gLogger.notice("Converting p12 key to pem format")
result = shellCall(900, f"export OPENSSL_CONF=/tmp && openssl pkcs12 -nocerts -in {p12} -out {key}")
# The last command was successful
if result["OK"] and result["Value"][0] == 0:
gLogger.notice("Converting p12 certificate to pem format")
result = shellCall(900, f"export OPENSSL_CONF=/tmp && openssl pkcs12 -clcerts -nokeys -in {p12} -out {cert}")
# Something went wrong
if not result["OK"] or result["Value"][0] != 0:
gLogger.fatal(result.get("Message", result["Value"][2]))
for old in [cert, key]:
if os.path.isfile(old + nowPrefix):
gLogger.notice(f"Restore {old} file from the {old + nowPrefix}")
shutil.move(old + nowPrefix, old)
sys.exit(1)
os.chmod(key, 0o400)
os.chmod(cert, 0o644)
gLogger.notice(f"{os.path.basename(cert)} and {os.path.basename(key)} was created in the {globus}")
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/scripts/dirac_cert_convert.py
|
Python
|
gpl-3.0
| 2,285
|
[
"DIRAC"
] |
56a17d30e0af4975fb325097035458d96550ffbec88d1192408af485392c0b96
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
if len(x) <= order + 2:
raise ValueError("the number of data points must exceed order + 2 "
"for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" A copy of the polynomial coefficients """
return self._coeffs.copy()
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
Eric89GXL/numpy
|
numpy/lib/polynomial.py
|
Python
|
bsd-3-clause
| 38,469
|
[
"Gaussian"
] |
44ac9e359158cfe72ccd8c8c57d0bbee679f646fcb056d2aa042b29a2d8679b7
|
"""
Views for the verification flow
"""
import decimal
import json
import logging
import urllib
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.staticfiles.storage import staticfiles_storage
from django.db import transaction
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.timezone import now
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View
from edx_rest_api_client.exceptions import SlumberBaseException
from ipware.ip import get_client_ip
from opaque_keys.edx.keys import CourseKey
from rest_framework.response import Response
from rest_framework.views import APIView
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.track import segment
from common.djangoapps.util.db import outer_atomic
from common.djangoapps.util.json_request import JsonResponse
from common.djangoapps.util.views import require_global_staff
from lms.djangoapps.commerce.utils import EcommerceService, is_account_activation_requirement_disabled
from lms.djangoapps.verify_student.emails import send_verification_approved_email, send_verification_confirmation_email
from lms.djangoapps.verify_student.image import InvalidImageData, decode_image_data
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification, VerificationDeadline
from lms.djangoapps.verify_student.tasks import send_verification_status_email
from lms.djangoapps.verify_student.utils import can_verify_now
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.djangoapps.embargo import api as embargo_api
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.lib.log_utils import audit_log
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from .services import IDVerificationService
log = logging.getLogger(__name__)
class PayAndVerifyView(View):
"""
View for the "verify and pay" flow.
This view is somewhat complicated, because the user
can enter it from a number of different places:
* From the "choose your track" page.
* After completing payment.
* From the dashboard in order to complete verification.
* From the dashboard in order to upgrade to a verified track.
The page will display different steps and requirements
depending on:
* Whether the user has submitted a photo verification recently.
* Whether the user has paid for the course.
* How the user reached the page (mostly affects messaging)
We are also super-paranoid about how users reach this page.
If they somehow aren't enrolled, or the course doesn't exist,
or they've unenrolled, or they've already paid/verified,
... then we try to redirect them to the page with the
most appropriate messaging (including the dashboard).
Note that this page does NOT handle re-verification
(photo verification that was denied or had an error);
that is handled by the "reverify" view.
"""
# Step definitions
#
# These represent the numbered steps a user sees in
# the verify / payment flow.
#
# Steps can either be:
# - displayed or hidden
# - complete or incomplete
#
# For example, when a user enters the verification/payment
# flow for the first time, the user will see steps
# for both payment and verification. As the user
# completes these steps (for example, submitting a photo)
# the steps will be marked "complete".
#
# If a user has already verified for another course,
# then the verification steps will be hidden,
# since the user has already completed them.
#
# If a user re-enters the flow from another application
# (for example, after completing payment through
# a third-party payment processor), then the user
# will resume the flow at an intermediate step.
#
INTRO_STEP = 'intro-step'
MAKE_PAYMENT_STEP = 'make-payment-step'
FACE_PHOTO_STEP = 'face-photo-step'
ID_PHOTO_STEP = 'id-photo-step'
REVIEW_PHOTOS_STEP = 'review-photos-step'
ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step'
ALL_STEPS = [
INTRO_STEP,
MAKE_PAYMENT_STEP,
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
PAYMENT_STEPS = [
MAKE_PAYMENT_STEP,
]
VERIFICATION_STEPS = [
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
# These steps can be skipped using the ?skip-first-step GET param
SKIP_STEPS = [
INTRO_STEP,
]
STEP_TITLES = {
INTRO_STEP: gettext_lazy("Intro"),
MAKE_PAYMENT_STEP: gettext_lazy("Make payment"),
FACE_PHOTO_STEP: gettext_lazy("Take photo"),
ID_PHOTO_STEP: gettext_lazy("Take a photo of your ID"),
REVIEW_PHOTOS_STEP: gettext_lazy("Review your info"),
ENROLLMENT_CONFIRMATION_STEP: gettext_lazy("Enrollment confirmation"),
}
# Messages
#
# Depending on how the user entered reached the page,
# we will display different text messaging.
# For example, we show users who are upgrading
# slightly different copy than users who are verifying
# for the first time.
#
FIRST_TIME_VERIFY_MSG = 'first-time-verify'
VERIFY_NOW_MSG = 'verify-now'
VERIFY_LATER_MSG = 'verify-later'
UPGRADE_MSG = 'upgrade'
# Requirements
#
# These explain to the user what he or she
# will need to successfully pay and/or verify.
#
# These are determined by the steps displayed
# to the user; for example, if the user does not
# need to complete the verification steps,
# then the photo ID and webcam requirements are hidden.
#
ACCOUNT_ACTIVATION_REQ = "account-activation-required"
PHOTO_ID_REQ = "photo-id-required"
WEBCAM_REQ = "webcam-required"
STEP_REQUIREMENTS = {
ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ],
FACE_PHOTO_STEP: [WEBCAM_REQ],
}
# Deadline types
VERIFICATION_DEADLINE = "verification"
UPGRADE_DEADLINE = "upgrade"
def _get_user_active_status(self, user):
"""
Returns the user's active status to the caller
Overrides the actual value if account activation has been disabled via waffle switch
Arguments:
user (User): Current user involved in the onboarding/verification flow
"""
return user.is_active or is_account_activation_requirement_disabled()
@method_decorator(login_required)
def get( # lint-amnesty, pylint: disable=too-many-statements
self, request, course_id,
always_show_payment=False,
current_step=None,
message=FIRST_TIME_VERIFY_MSG
):
"""
Render the payment and verification flow.
Arguments:
request (HttpRequest): The request object.
course_id (unicode): The ID of the course the user is trying
to enroll in.
Keyword Arguments:
always_show_payment (bool): If True, show the payment steps
even if the user has already paid. This is useful
for users returning to the flow after paying.
current_step (string): The current step in the flow.
message (string): The messaging to display.
Returns:
HttpResponse
Raises:
Http404: The course does not exist or does not
have a verified mode.
"""
# Parse the course key
# The URL regex should guarantee that the key format is valid.
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warning("Could not find course with ID %s.", course_id)
raise Http404
# Check whether the user has access to this course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_client_ip(request)[0],
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# If the verification deadline has passed
# then show the user a message that he/she can't verify.
#
# We're making the assumptions (enforced in Django admin) that:
#
# 1) Only verified modes have verification deadlines.
#
# 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you
# let someone upgrade into a verified track if they can't complete verification?
#
verification_deadline = VerificationDeadline.deadline_for_course(course.id)
response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline)
if response is not None:
log.info("Verification deadline for '%s' has passed.", course.id)
return response
# Retrieve the relevant course mode for the payment/verification flow.
#
# WARNING: this is technical debt! A much better way to do this would be to
# separate out the payment flow and use the product SKU to figure out what
# the user is trying to purchase.
#
# Nonetheless, for the time being we continue to make the really ugly assumption
# that at some point there was a paid course mode we can query for the price.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment/verification, so respond with a 404.
if relevant_course_mode is not None:
if CourseMode.is_verified_mode(relevant_course_mode):
log.info(
"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.",
request.user.id, course_id, current_step
)
else:
log.info(
"Entering payment flow for user '%s', course '%s', with current step '%s'",
request.user.id, course_id, current_step
)
else:
# Otherwise, there has never been a verified/paid mode,
# so return a page not found response.
log.warning(
"No paid/verified course mode found for course '%s' for verification/payment flow request",
course_id
)
raise Http404
# If the user is trying to *pay* and the upgrade deadline has passed,
# then they shouldn't be able to enter the flow.
#
# NOTE: This should match the availability dates used by the E-Commerce service
# to determine whether a user can purchase a product. The idea is that if the service
# won't fulfill the order, we shouldn't even let the user get into the payment flow.
#
user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG]
if user_is_trying_to_pay:
upgrade_deadline = relevant_course_mode.expiration_datetime
response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline)
if response is not None:
log.info("Upgrade deadline for '%s' has passed.", course.id)
return response
# Check whether the user has verified, paid, and enrolled.
# A user is considered "paid" if he or she has an enrollment
# with a paid course mode (such as "verified").
# For this reason, every paid user is enrolled, but not
# every enrolled user is paid.
# If the course mode is not verified(i.e only paid) then already_verified is always True
already_verified = (
self._check_already_verified(request.user)
if CourseMode.is_verified_mode(relevant_course_mode)
else True
)
already_paid, is_enrolled = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment / verification status.
sku_to_use = relevant_course_mode.sku
purchase_workflow = request.GET.get('purchase_workflow', 'single')
if purchase_workflow == 'bulk' and relevant_course_mode.bulk_sku:
sku_to_use = relevant_course_mode.bulk_sku
redirect_response = self._redirect_if_necessary(
message,
already_verified,
already_paid,
is_enrolled,
course_key,
user_is_trying_to_pay,
request.user,
sku_to_use
)
if redirect_response is not None:
return redirect_response
display_steps = self._display_steps(
always_show_payment,
already_verified,
already_paid,
relevant_course_mode
)
# Override the actual value if account activation has been disabled
# Also see the reference to this parameter in context dictionary further down
user_is_active = self._get_user_active_status(request.user)
requirements = self._requirements(display_steps, user_is_active)
if current_step is None:
current_step = display_steps[0]['name']
# Allow the caller to skip the first page
# This is useful if we want the user to be able to
# use the "back" button to return to the previous step.
# This parameter should only work for known skip-able steps
if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS:
display_step_names = [step['name'] for step in display_steps]
current_step_idx = display_step_names.index(current_step)
if (current_step_idx + 1) < len(display_steps):
current_step = display_steps[current_step_idx + 1]['name']
courseware_url = ""
if not course.start or course.start < now():
courseware_url = reverse(
'course_root',
kwargs={'course_id': str(course_key)}
)
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# If the user set a contribution amount on another page,
# use that amount to pre-fill the price selection form.
contribution_amount = request.session.get(
'donation_for_course', {}
).get(str(course_key), '')
# Remember whether the user is upgrading
# so we can fire an analytics event upon payment.
request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG)
# Determine the photo verification status
verification_good_until = self._verification_valid_until(request.user)
# get available payment processors
processors = ecommerce_api_client(request.user).payment.processors.get()
# Render the top-level page
context = {
'contribution_amount': contribution_amount,
'course': course,
'course_key': str(course_key),
'checkpoint_location': request.GET.get('checkpoint'),
'course_mode': relevant_course_mode,
'courseware_url': courseware_url,
'current_step': current_step,
'disable_courseware_js': True,
'display_steps': display_steps,
'is_active': json.dumps(user_is_active),
'user_email': request.user.email,
'message_key': message,
'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
'processors': processors,
'requirements': requirements,
'user_full_name': full_name,
'verification_deadline': verification_deadline or "",
'already_verified': already_verified,
'verification_good_until': verification_good_until,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
'nav_hidden': True,
'is_ab_testing': 'begin-flow' in request.path,
}
return render_to_response("verify_student/pay_and_verify.html", context)
def add_utm_params_to_url(self, url): # lint-amnesty, pylint: disable=missing-function-docstring
# utm_params is [(u'utm_content', u'course-v1:IDBx IDB20.1x 1T2017'),...
utm_params = [item for item in self.request.GET.items() if 'utm_' in item[0]]
# utm_params is utm_content=course-v1%3AIDBx+IDB20.1x+1T2017&...
utm_params = urllib.parse.urlencode(utm_params, True)
# utm_params is utm_content=course-v1:IDBx+IDB20.1x+1T2017&...
# (course-keys do not have url encoding)
utm_params = urllib.parse.unquote(utm_params)
if utm_params:
if '?' in url:
url = url + '&' + utm_params
else:
url = url + '?' + utm_params
return url
def _redirect_if_necessary(
self, message, already_verified, already_paid, is_enrolled, course_key,
user_is_trying_to_pay, user, sku
):
"""Redirect the user to a more appropriate page if necessary.
In some cases, a user may visit this page with
verification / enrollment / payment state that
we don't anticipate. For example, a user may unenroll
from the course after paying for it, then visit the
"verify now" page to complete verification.
When this happens, we try to redirect the user to
the most appropriate page.
Arguments:
message (string): The messaging of the page. Should be a key
in `MESSAGES`.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_enrolled (bool): Whether the user has an active enrollment
in the course.
course_key (CourseKey): The key for the course.
Returns:
HttpResponse or None
"""
url = None
course_kwargs = {'course_id': str(course_key)}
if already_verified and already_paid:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
url = reverse('dashboard')
elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG]:
if is_enrolled:
# If the user is already enrolled but hasn't yet paid,
# then the "upgrade" messaging is more appropriate.
if not already_paid:
url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs)
else:
# If the user is NOT enrolled, then send him/her
# to the first time verification page.
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
elif message == self.UPGRADE_MSG:
if is_enrolled:
if already_paid:
# If the student has paid, but not verified, redirect to the verification flow.
url = IDVerificationService.get_verify_location(str(course_key))
else:
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
if user_is_trying_to_pay and self._get_user_active_status(user) and not already_paid:
# If the user is trying to pay, has activated their account, and the ecommerce service
# is enabled redirect them to the ecommerce checkout page.
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(user):
url = ecommerce_service.get_checkout_page_url(
sku,
catalog=self.request.GET.get('catalog')
)
# Redirect if necessary, otherwise implicitly return None
if url is not None:
url = self.add_utm_params_to_url(url)
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for a course.
The returned course mode may or may not be expired.
Unexpired modes are preferred to expired modes.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first mode that matches the following criteria:
# * Unexpired
# * Price > 0
# * Not credit
for mode in unexpired_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, find the first non credit expired paid mode
for mode in all_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode):
"""Determine which steps to display to the user.
Includes all steps by default, but removes steps
if the user has already completed them.
Arguments:
always_show_payment (bool): If True, display the payment steps
even if the user has already paid.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
Returns:
list
"""
display_steps = self.ALL_STEPS
remove_steps = set()
if already_verified or not CourseMode.is_verified_mode(course_mode):
remove_steps |= set(self.VERIFICATION_STEPS)
if already_paid and not always_show_payment:
remove_steps |= set(self.PAYMENT_STEPS)
else:
# The "make payment" step doubles as an intro step,
# so if we're showing the payment step, hide the intro step.
remove_steps |= {self.INTRO_STEP}
return [
{
'name': step,
'title': str(self.STEP_TITLES[step]),
}
for step in display_steps
if step not in remove_steps
]
def _requirements(self, display_steps, is_active):
"""Determine which requirements to show the user.
For example, if the user needs to submit a photo
verification, tell the user that she will need
a photo ID and a webcam.
Arguments:
display_steps (list): The steps to display to the user.
is_active (bool): If False, adds a requirement to activate the user account.
Returns:
dict: Keys are requirement names, values are booleans
indicating whether to show the requirement.
"""
all_requirements = {
self.ACCOUNT_ACTIVATION_REQ: not is_active,
self.PHOTO_ID_REQ: False,
self.WEBCAM_REQ: False,
}
# Remove the account activation requirement if disabled via waffle
if is_account_activation_requirement_disabled():
all_requirements.pop(self.ACCOUNT_ACTIVATION_REQ)
display_steps = {step['name'] for step in display_steps}
for step, step_requirements in self.STEP_REQUIREMENTS.items():
if step in display_steps:
for requirement in step_requirements:
all_requirements[requirement] = True
return all_requirements
def _verification_valid_until(self, user, date_format="%m/%d/%Y"):
"""
Check whether the user has a valid or pending verification.
Arguments:
user:
date_format: optional parameter for formatting datetime
object to string in response
Returns:
datetime object in string format
"""
expiration_datetime = IDVerificationService.get_expiration_datetime(
user, ['submitted', 'approved', 'must_retry']
)
# return 'expiration_datetime' of latest photo verification if found,
# otherwise implicitly return ''
if expiration_datetime:
return expiration_datetime.strftime(date_format)
return ''
def _check_already_verified(self, user):
"""Check whether the user has a valid or pending verification.
Note that this includes cases in which the user's verification
has not been accepted (either because it hasn't been processed,
or there was an error).
This should return True if the user has done their part:
submitted photos within the expiration period.
"""
return IDVerificationService.user_has_valid_or_pending(user)
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime):
"""
Respond with some error messaging if the deadline has passed.
Arguments:
course (Course): The course the user is trying to enroll in.
deadline_name (str): One of the deadline constants.
deadline_datetime (datetime): The deadline.
Returns: HttpResponse or None
"""
if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]:
log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name)
return None
deadline_passed = (
deadline_datetime is not None and
deadline_datetime < now()
)
if deadline_passed:
context = {
'course': course,
'deadline_name': deadline_name,
'deadline': deadline_datetime
}
return render_to_response("verify_student/missed_deadline.html", context)
def checkout_with_ecommerce_service(user, course_key, course_mode, processor):
""" Create a new basket and trigger immediate checkout, using the E-Commerce API. """
course_id = str(course_key)
try:
api = ecommerce_api_client(user)
# Make an API call to create the order and retrieve the results
result = api.baskets.post({
'products': [{'sku': course_mode.sku}],
'checkout': True,
'payment_processor_name': processor
})
# Pass the payment parameters directly from the API response.
return result.get('payment_data')
except SlumberBaseException:
params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id}
log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params)
raise
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=course_mode.slug,
processor_name=processor,
user_id=user.id
)
@require_POST
@login_required
def create_order(request):
"""
This endpoint is named 'create_order' for backward compatibility, but its
actual use is to add a single product to the user's cart and request
immediate checkout.
"""
course_id = request.POST['course_id']
course_id = CourseKey.from_string(course_id)
donation_for_course = request.session.get('donation_for_course', {})
contribution = request.POST.get("contribution", donation_for_course.get(str(course_id), 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
current_mode = None
sku = request.POST.get('sku', None)
if sku:
try:
current_mode = CourseMode.objects.get(sku=sku)
except CourseMode.DoesNotExist:
log.exception('Failed to find CourseMode with SKU [%s].', sku)
if not current_mode:
# Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes
# for course exist then choose the first one
paid_modes = CourseMode.paid_modes_for_course(course_id)
if paid_modes:
if len(paid_modes) > 1:
log.warning("Multiple paid course modes found for course '%s' for create order request", course_id)
current_mode = paid_modes[0]
# Make sure this course has a paid mode
if not current_mode:
log.warning("Create order requested for course '%s' without a paid mode.", course_id)
return HttpResponseBadRequest(_("This course doesn't support paid certificates"))
if CourseMode.is_professional_mode(current_mode):
amount = current_mode.min_price
if amount < current_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
# if request.POST doesn't contain 'processor' then the service's default payment processor will be used.
payment_data = checkout_with_ecommerce_service(
request.user,
course_id,
current_mode,
request.POST.get('processor')
)
if 'processor' not in request.POST:
# (XCOM-214) To be removed after release.
# the absence of this key in the POST payload indicates that the request was initiated from
# a stale js client, which expects a response containing only the 'payment_form_data' part of
# the payment data result.
payment_data = payment_data['payment_form_data']
return JsonResponse(payment_data)
class SubmitPhotosView(View):
"""
End-point for submitting photos for verification.
"""
@method_decorator(transaction.non_atomic_requests)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@method_decorator(login_required)
@method_decorator(outer_atomic())
def post(self, request):
"""
Submit photos for verification.
This end-point is used for the following cases:
* Initial verification through the pay-and-verify flow.
* Initial verification initiated from a checkpoint within a course.
* Re-verification initiated from a checkpoint within a course.
POST Parameters:
face_image (str): base64-encoded image data of the user's face.
photo_id_image (str): base64-encoded image data of the user's photo ID.
full_name (str): The user's full name, if the user is requesting a name change as well.
experiment_name (str): The name of an A/B experiment associated with this attempt
portrait_photo_mode (str): The mode in which the portrait photo was taken
id_photo_mode (str): The mode in which the id photo was taken
"""
# If the user already has an initial verification attempt, we can re-use the photo ID
# the user submitted with the initial attempt.
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user)
# Validate the POST parameters
params, response = self._validate_parameters(request, bool(initial_verification))
if response is not None:
return response
full_name = None
if "full_name" in params:
full_name = params["full_name"]
# Retrieve the image data
# Validation ensures that we'll have a face image, but we may not have
# a photo ID image if this is a re-verification.
face_image, photo_id_image, response = self._validate_and_decode_image_data(
request, params["face_image"], params.get("photo_id_image")
)
# If we have a photo_id we do not want use the initial verification image.
if photo_id_image is not None:
initial_verification = None
if response is not None:
return response
# Submit the attempt
attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification, full_name)
# Send event to segment for analyzing A/B testing data
data = {
"attempt_id": attempt.id,
"experiment_name": params.get("experiment_name", "original")
}
self._fire_event(request.user, "edx.bi.experiment.verification.attempt", data)
if params.get("portrait_photo_mode"):
mode_data = {
"attempt_id": attempt.id,
"portrait_photo_mode": params.get("portrait_photo_mode"),
"id_photo_mode": params.get("id_photo_mode")
}
self._fire_event(request.user, "edx.bi.experiment.verification.attempt.photo.mode", mode_data)
self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"})
self._send_confirmation_email(request.user)
return JsonResponse({})
def _validate_parameters(self, request, has_initial_verification):
"""
Check that the POST parameters are valid.
Arguments:
request (HttpRequest): The request object.
has_initial_verification (bool): Whether the user has an initial verification attempt.
Returns:
HttpResponse or None
"""
# Pull out the parameters we care about.
params = {
param_name: request.POST[param_name]
for param_name in [
"face_image",
"photo_id_image",
"full_name",
"experiment_name",
"portrait_photo_mode",
"id_photo_mode"
]
if param_name in request.POST
}
# If the user already has an initial verification attempt, then we don't
# require the user to submit a photo ID image, since we can re-use the photo ID
# image from the initial attempt.
# If we don't have an initial verification OR a photo ID image, something has gone
# terribly wrong in the JavaScript. Log this as an error so we can track it down.
if "photo_id_image" not in params and not has_initial_verification:
log.error(
(
"User %s does not have an initial verification attempt "
"and no photo ID image data was provided. "
"This most likely means that the JavaScript client is not "
"correctly constructing the request to submit photos."
), request.user.id
)
return None, HttpResponseBadRequest(
_("Photo ID image is required if the user does not have an initial verification attempt.")
)
# The face image is always required.
if "face_image" not in params:
msg = _("Missing required parameter face_image")
log.error(("User {user_id} missing required parameter face_image").format(user_id=request.user.id))
return None, HttpResponseBadRequest(msg)
return params, None
def _validate_and_decode_image_data(self, request, face_data, photo_id_data=None):
"""
Validate and decode image data sent with the request.
Arguments:
face_data (str): base64-encoded face image data.
Keyword Arguments:
photo_id_data (str): base64-encoded photo ID image data.
Returns:
tuple of (str, str, HttpResponse)
"""
for image_data in [face_data, photo_id_data]:
# Validate that the media type is image
if image_data and not image_data.startswith('data:image'):
msg = _("Image data is in an unsupported format.")
data_type = image_data.split(',')[0]
if data_type:
log.error(
"Image data for user_id={user_id} was uploaded in an unsupported "
"format: {data_type}".format(user_id=request.user.id, data_type=data_type)
)
else:
log.error(
"Image data type for user_id={user_id} could not be identified.".format(
user_id=request.user.id
)
)
return None, None, HttpResponseBadRequest(msg)
try:
# Decode face image data (used for both an initial and re-verification)
face_image = decode_image_data(face_data)
# Decode the photo ID image data if it's provided
photo_id_image = (
decode_image_data(photo_id_data)
if photo_id_data is not None else None
)
return face_image, photo_id_image, None
except InvalidImageData:
msg = _("Image data is not valid.")
log.error(("Image data for user {user_id} is not valid").format(user_id=request.user.id))
return None, None, HttpResponseBadRequest(msg)
def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None, provided_name=None):
"""
Submit a verification attempt.
Arguments:
user (User): The user making the attempt.
face_image (str): Decoded face image data.
Keyword Arguments:
photo_id_image (str or None): Decoded photo ID image data.
initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt.
provided_name (str or None): full name given by user for this attempt
"""
attempt = SoftwareSecurePhotoVerification(user=user)
if provided_name:
attempt.name = provided_name
# We will always have face image data, so upload the face image
attempt.upload_face_image(face_image)
# If an ID photo wasn't submitted, re-use the ID photo from the initial attempt.
# Earlier validation rules ensure that at least one of these is available.
if photo_id_image is not None:
attempt.upload_photo_id_image(photo_id_image)
elif initial_verification is None:
# Earlier validation should ensure that we never get here.
log.error(
"Neither a photo ID image or initial verification attempt provided. "
"Parameter validation in the view should prevent this from happening!"
)
# Submit the attempt
attempt.mark_ready()
attempt.submit(copy_id_photo_from=initial_verification)
return attempt
def _send_confirmation_email(self, user):
"""
Send an email confirming that the user submitted photos
for initial verification.
"""
lms_root_url = configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL)
context = {
'user': user,
'dashboard_link': '{}{}'.format(lms_root_url, reverse('dashboard'))
}
return send_verification_confirmation_email(context)
def _fire_event(self, user, event_name, parameters):
"""
Fire an analytics event.
Arguments:
user (User): The user who submitted photos.
event_name (str): Name of the analytics event.
parameters (dict): Event parameters.
Returns: None
"""
segment.track(user.id, event_name, parameters)
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request): # lint-amnesty, pylint: disable=too-many-statements
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body.decode('utf-8'))
except ValueError:
log.exception(f"Invalid JSON received from Software Secure:\n\n{body}\n")
return HttpResponseBadRequest(f"Invalid JSON. Received:\n\n{body}")
if not isinstance(body_dict, dict):
log.error(f"Reply from Software Secure is not a dict:\n\n{body}\n")
return HttpResponseBadRequest(f"JSON should be dict. Received:\n\n{body}")
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
# if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id)
return HttpResponseBadRequest(f"edX ID {receipt_id} not found")
user = attempt.user
verification_status_email_vars = {
'platform_name': settings.PLATFORM_NAME,
}
if result == "PASS":
# If this verification is not an outdated version then make expiry email date of previous approved verification NULL # lint-amnesty, pylint: disable=line-too-long
# Setting expiry email date to NULL is important so that it does not get filtered in the management command
# that sends email when verification expires : verify_student/send_verification_expiry_email
if attempt.status != 'approved':
verification = SoftwareSecurePhotoVerification.objects.filter(status='approved', user_id=attempt.user_id)
if verification:
log.info(f'Making expiry email date of previous approved verification NULL for {attempt.user_id}') # lint-amnesty, pylint: disable=line-too-long
# The updated_at field in sspv model has auto_now set to True, which means any time save() is called on
# the model instance, `updated_at` will change. Some of the existing functionality of verification
# (showing your verification has expired on dashboard) relies on updated_at.
# In case the attempt.approve() fails for some reason and to not cause any inconsistencies in existing
# functionality update() is called instead of save()
previous_verification = verification.latest('updated_at')
SoftwareSecurePhotoVerification.objects.filter(pk=previous_verification.pk
).update(expiry_email_date=None)
log.debug(f'Approving verification for {receipt_id}')
attempt.approve()
expiration_datetime = attempt.expiration_datetime.date()
if settings.VERIFY_STUDENT.get('USE_DJANGO_MAIL'):
verification_status_email_vars['expiration_datetime'] = expiration_datetime.strftime("%m/%d/%Y")
verification_status_email_vars['full_name'] = user.profile.name
subject = _("Your {platform_name} ID verification was approved!").format(
platform_name=settings.PLATFORM_NAME
)
context = {
'subject': subject,
'template': 'emails/passed_verification_email.txt',
'email': user.email,
'email_vars': verification_status_email_vars
}
send_verification_status_email.delay(context)
else:
email_context = {'user': user, 'expiration_datetime': expiration_datetime.strftime("%m/%d/%Y")}
send_verification_approved_email(context=email_context)
elif result == "FAIL":
log.debug("Denying verification for %s", receipt_id)
attempt.deny(json.dumps(reason), error_code=error_code)
reverify_url = f'{settings.ACCOUNT_MICROFRONTEND_URL}/id-verification'
verification_status_email_vars['reasons'] = reason
verification_status_email_vars['reverify_url'] = reverify_url
verification_status_email_vars['faq_url'] = settings.ID_VERIFICATION_SUPPORT_LINK
subject = _("Your {platform_name} Verification Has Been Denied").format(
platform_name=settings.PLATFORM_NAME
)
context = {
'subject': subject,
'template': 'emails/failed_verification_email.txt',
'email': user.email,
'email_vars': verification_status_email_vars
}
send_verification_status_email.delay(context)
elif result == "SYSTEM FAIL":
log.debug("System failure for %s -- resetting to must_retry", receipt_id)
attempt.system_error(json.dumps(reason), error_code=error_code)
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result %s", result)
return HttpResponseBadRequest(
f"Result {result} not understood. Known results: PASS, FAIL, SYSTEM FAIL"
)
# Send event to segment for analyzing A/B testing data
data = {
"attempt_id": attempt.id,
"result": result
}
segment.track(attempt.user.id, "edx.bi.experiment.verification.attempt.result", data)
return HttpResponse("OK!")
class VerificationStatusAPIView(APIView):
"""
GET /verify_student/status/
Parameters: None
Returns:
200 OK
{
"status": String,
"expires": String,
"can_verify": Boolean
}
Notes:
* "status" is a verification status string, or "none" if there is none.
* Verification should be allowed if and only if "can_verify" is true.
* If there is a current verification, then "expires" is a ISO datetime string.
* Otherwise, "expires" is omitted.
"""
@method_decorator(login_required)
def get(self, request):
"""
Handle the GET request.
"""
verification_status = IDVerificationService.user_status(request.user)
expiration_datetime = IDVerificationService.get_expiration_datetime(request.user, ['approved'])
can_verify = can_verify_now(verification_status, expiration_datetime)
data = {
'status': verification_status['status'],
'can_verify': can_verify,
}
if expiration_datetime:
data['expires'] = expiration_datetime
return Response(data)
class ReverifyView(View):
"""
Reverification occurs when a user's initial verification is denied
or expires. When this happens, users can re-submit photos through
the re-verification flow.
Unlike in-course reverification, this flow requires users to submit
*both* face and ID photos. In contrast, during in-course reverification,
students submit only face photos, which are matched against the ID photo
the user submitted during initial verification.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render the reverification flow.
Most of the work is done client-side by composing the same
Backbone views used in the initial verification flow.
"""
IDV_workflow = IDVerificationService.get_verify_location()
return redirect(IDV_workflow)
class PhotoUrlsView(APIView):
"""
This can be used to help debug IDV photos
"""
@method_decorator(require_global_staff)
def get(self, request, receipt_id):
"""
Endpoint for retrieving photo urls for IDV
GET /verify_student/photo-urls/{receipt_id}
Returns:
200 OK
{
"EdX-ID": receipt_id,
"ExpectedName": user profile name,
"PhotoID": id photo S3 url,
"PhotoIDKey": encrypted photo id key,
"UserPhoto": face photo S3 url,
"UserPhotoKey": encrypted user photo key,
}
"""
verification = SoftwareSecurePhotoVerification.get_verification_from_receipt(receipt_id)
if verification:
_, body = verification.create_request()
# remove this key, as it isn't needed
body.pop('SendResponseTo')
return Response(body)
log.warning("Could not find verification with receipt ID %s.", receipt_id)
raise Http404
class DecryptFaceImageView(APIView):
"""
Endpoint to retrieve decrypted IDV face image data. Can only be used on stage.
"""
@method_decorator(require_global_staff)
def get(self, request, receipt_id):
"""
Endpoint used for decrypting images on stage based on a given receipt ID
GET /verify_student/decrypt-idv-images/face/{receipt_id}
Returns:
200 OK
{
img
}
"""
# if this endpoint is not being accessed on stage, raise a 403. Only stage will have an RSA_PRIVATE_KEY
if not settings.VERIFY_STUDENT["SOFTWARE_SECURE"].get("RSA_PRIVATE_KEY", None):
log.warning("Cannot access image decryption outside of staging environment")
return HttpResponseForbidden()
verification = SoftwareSecurePhotoVerification.get_verification_from_receipt(receipt_id)
if verification:
user_photo = verification.download_face_image()
if user_photo:
return HttpResponse(user_photo, content_type="image/png")
log.warning("Could not decrypt face image for receipt ID %s.", receipt_id)
raise Http404
class DecryptPhotoIDImageView(APIView):
"""
Endpoint to retrieve decrypted IDV photo ID image data. Can only be used on stage.
"""
@method_decorator(require_global_staff)
def get(self, request, receipt_id):
"""
Endpoint used for decrypting images on stage based on a given receipt ID
GET /verify_student/decrypt-idv-images/photo-id/{receipt_id}
Returns:
200 OK
{
img
}
"""
# if this endpoint is not being accessed on stage, raise a 403. Only stage will have an RSA_PRIVATE_KEY
if not settings.VERIFY_STUDENT["SOFTWARE_SECURE"].get("RSA_PRIVATE_KEY", None):
log.warning("Cannot access image decryption outside of staging environment")
return HttpResponseForbidden()
verification = SoftwareSecurePhotoVerification.get_verification_from_receipt(receipt_id)
if verification:
id_photo = verification.download_photo_id_image()
if id_photo:
return HttpResponse(id_photo, content_type="image/png")
log.warning("Could not decrypt photo ID image for receipt ID %s.", receipt_id)
raise Http404
|
arbrandes/edx-platform
|
lms/djangoapps/verify_student/views.py
|
Python
|
agpl-3.0
| 53,468
|
[
"VisIt"
] |
115813f6acdc9d009cc02534e011631bf22ae714338d01692eae8e51123a0f49
|
__author__ = "Xiang-Guo Li"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__email__ = "xil110@eng.ucsd.edu"
__date__ = "07/30/18"
import os
import unittest
import warnings
import numpy as np
from pymatgen.analysis.gb.grain import GrainBoundary, GrainBoundaryGenerator
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "grain_boundary")
class TestGrainBoundary(PymatgenTest):
@classmethod
def setUpClass(cls):
warnings.filterwarnings("ignore")
cls.Cu_conv = Structure.from_file(os.path.join(test_dir, "Cu_mp-30_conventional_standard.cif"))
GB_Cu_conv = GrainBoundaryGenerator(cls.Cu_conv)
cls.Cu_GB1 = GB_Cu_conv.gb_from_parameters(
[1, 2, 3],
123.74898859588858,
expand_times=4,
vacuum_thickness=1.5,
ab_shift=[0.0, 0.0],
plane=[1, 3, 1],
rm_ratio=0.0,
)
cls.Cu_GB2 = GB_Cu_conv.gb_from_parameters(
[1, 2, 3],
123.74898859588858,
expand_times=4,
vacuum_thickness=1.5,
ab_shift=[0.2, 0.2],
rm_ratio=0.0,
)
@classmethod
def tearDownClass(cls):
warnings.simplefilter("default")
def test_init(self):
self.assertAlmostEqual(self.Cu_GB1.rotation_angle, 123.74898859588858)
self.assertAlmostEqual(self.Cu_GB1.vacuum_thickness, 1.5)
self.assertListEqual(self.Cu_GB2.rotation_axis, [1, 2, 3])
self.assertArrayAlmostEqual(np.array(self.Cu_GB1.ab_shift), np.array([0.0, 0.0]))
self.assertArrayAlmostEqual(np.array(self.Cu_GB2.ab_shift), np.array([0.2, 0.2]))
self.assertListEqual(self.Cu_GB1.gb_plane, [1, 3, 1])
self.assertListEqual(self.Cu_GB2.gb_plane, [1, 2, 3])
self.assertArrayAlmostEqual(
np.array(self.Cu_GB1.init_cell.lattice.matrix),
np.array(self.Cu_conv.lattice.matrix),
)
def test_copy(self):
Cu_GB1_copy = self.Cu_GB1.copy()
self.assertAlmostEqual(Cu_GB1_copy.sigma, self.Cu_GB1.sigma)
self.assertAlmostEqual(Cu_GB1_copy.rotation_angle, self.Cu_GB1.rotation_angle)
self.assertListEqual(Cu_GB1_copy.rotation_axis, self.Cu_GB1.rotation_axis)
self.assertListEqual(Cu_GB1_copy.gb_plane, self.Cu_GB1.gb_plane)
self.assertArrayAlmostEqual(Cu_GB1_copy.init_cell.lattice.matrix, self.Cu_GB1.init_cell.lattice.matrix)
self.assertArrayAlmostEqual(
Cu_GB1_copy.oriented_unit_cell.lattice.matrix,
self.Cu_GB1.oriented_unit_cell.lattice.matrix,
)
self.assertArrayAlmostEqual(Cu_GB1_copy.lattice.matrix, self.Cu_GB1.lattice.matrix)
def test_sigma(self):
self.assertAlmostEqual(self.Cu_GB1.sigma, 9)
self.assertAlmostEqual(self.Cu_GB2.sigma, 9)
def test_top_grain(self):
self.assertAlmostEqual(self.Cu_GB1.num_sites, self.Cu_GB1.top_grain.num_sites * 2)
self.assertArrayAlmostEqual(self.Cu_GB1.lattice.matrix, self.Cu_GB1.top_grain.lattice.matrix)
def test_bottom_grain(self):
self.assertAlmostEqual(self.Cu_GB1.num_sites, self.Cu_GB1.bottom_grain.num_sites * 2)
self.assertArrayAlmostEqual(self.Cu_GB1.lattice.matrix, self.Cu_GB1.bottom_grain.lattice.matrix)
def test_coincidents(self):
self.assertAlmostEqual(self.Cu_GB1.num_sites / self.Cu_GB1.sigma, len(self.Cu_GB1.coincidents))
self.assertAlmostEqual(self.Cu_GB2.num_sites / self.Cu_GB2.sigma, len(self.Cu_GB2.coincidents))
def test_as_dict_and_from_dict(self):
d1 = self.Cu_GB1.as_dict()
d2 = self.Cu_GB2.as_dict()
Cu_GB1_new = GrainBoundary.from_dict(d1)
Cu_GB2_new = GrainBoundary.from_dict(d2)
self.assertAlmostEqual(Cu_GB1_new.sigma, self.Cu_GB1.sigma)
self.assertAlmostEqual(Cu_GB1_new.rotation_angle, self.Cu_GB1.rotation_angle)
self.assertListEqual(Cu_GB1_new.rotation_axis, self.Cu_GB1.rotation_axis)
self.assertListEqual(Cu_GB1_new.gb_plane, self.Cu_GB1.gb_plane)
self.assertArrayAlmostEqual(Cu_GB1_new.init_cell.lattice.matrix, self.Cu_GB1.init_cell.lattice.matrix)
self.assertArrayAlmostEqual(
Cu_GB1_new.oriented_unit_cell.lattice.matrix,
self.Cu_GB1.oriented_unit_cell.lattice.matrix,
)
self.assertArrayAlmostEqual(Cu_GB1_new.lattice.matrix, self.Cu_GB1.lattice.matrix)
self.assertAlmostEqual(Cu_GB2_new.sigma, self.Cu_GB2.sigma)
self.assertAlmostEqual(Cu_GB2_new.rotation_angle, self.Cu_GB2.rotation_angle)
self.assertListEqual(Cu_GB2_new.rotation_axis, self.Cu_GB2.rotation_axis)
self.assertListEqual(Cu_GB2_new.gb_plane, self.Cu_GB2.gb_plane)
self.assertArrayAlmostEqual(Cu_GB2_new.init_cell.lattice.matrix, self.Cu_GB2.init_cell.lattice.matrix)
self.assertArrayAlmostEqual(
Cu_GB2_new.oriented_unit_cell.lattice.matrix,
self.Cu_GB2.oriented_unit_cell.lattice.matrix,
)
self.assertArrayAlmostEqual(Cu_GB2_new.lattice.matrix, self.Cu_GB2.lattice.matrix)
class GrainBoundaryGeneratorTest(PymatgenTest):
@classmethod
def setUpClass(cls):
warnings.filterwarnings("ignore")
cls.Cu_prim = Structure.from_file(os.path.join(test_dir, "Cu_mp-30_primitive.cif"))
cls.GB_Cu_prim = GrainBoundaryGenerator(cls.Cu_prim)
cls.Cu_conv = Structure.from_file(os.path.join(test_dir, "Cu_mp-30_conventional_standard.cif"))
cls.GB_Cu_conv = GrainBoundaryGenerator(cls.Cu_conv)
cls.Be = Structure.from_file(os.path.join(test_dir, "Be_mp-87_conventional_standard.cif"))
cls.GB_Be = GrainBoundaryGenerator(cls.Be)
cls.Pa = Structure.from_file(os.path.join(test_dir, "Pa_mp-62_conventional_standard.cif"))
cls.GB_Pa = GrainBoundaryGenerator(cls.Pa)
cls.Br = Structure.from_file(os.path.join(test_dir, "Br_mp-23154_conventional_standard.cif"))
cls.GB_Br = GrainBoundaryGenerator(cls.Br)
cls.Bi = Structure.from_file(os.path.join(test_dir, "Bi_mp-23152_primitive.cif"))
cls.GB_Bi = GrainBoundaryGenerator(cls.Bi)
@classmethod
def tearDownClass(cls):
warnings.simplefilter("default")
def test_gb_from_parameters(self):
# from fcc primitive cell,axis[1,2,3],sigma 9.
gb_cu_123_prim1 = self.GB_Cu_prim.gb_from_parameters([1, 2, 3], 123.74898859588858, expand_times=2)
lat_mat1 = gb_cu_123_prim1.lattice.matrix
c_vec1 = np.cross(lat_mat1[0], lat_mat1[1]) / np.linalg.norm(np.cross(lat_mat1[0], lat_mat1[1]))
c_len1 = np.dot(lat_mat1[2], c_vec1)
vol_ratio = gb_cu_123_prim1.volume / self.Cu_prim.volume
self.assertAlmostEqual(vol_ratio, 9 * 2 * 2, 8)
# test expand_times and vacuum layer
gb_cu_123_prim2 = self.GB_Cu_prim.gb_from_parameters(
[1, 2, 3], 123.74898859588858, expand_times=4, vacuum_thickness=1.5
)
lat_mat2 = gb_cu_123_prim2.lattice.matrix
c_vec2 = np.cross(lat_mat2[0], lat_mat2[1]) / np.linalg.norm(np.cross(lat_mat2[0], lat_mat2[1]))
c_len2 = np.dot(lat_mat2[2], c_vec2)
self.assertAlmostEqual((c_len2 - 1.5 * 2) / c_len1, 2)
# test normal
gb_cu_123_prim3 = self.GB_Cu_prim.gb_from_parameters([1, 2, 3], 123.74898859588858, expand_times=2, normal=True)
lat_mat3 = gb_cu_123_prim3.lattice.matrix
c_vec3 = np.cross(lat_mat3[0], lat_mat3[1]) / np.linalg.norm(np.cross(lat_mat3[0], lat_mat3[1]))
ab_len3 = np.linalg.norm(np.cross(lat_mat3[2], c_vec3))
self.assertAlmostEqual(ab_len3, 0)
# test normal in tilt boundary
# The 'finfo(np.float32).eps' is the smallest representable positive number in float32,
# which has been introduced because comparing to just zero or one failed the test by rounding errors.
gb_cu_010_conv1 = self.GB_Cu_conv.gb_from_parameters(
rotation_axis=[0, 1, 0],
rotation_angle=36.8698976458,
expand_times=1,
vacuum_thickness=1.0,
ab_shift=[0.0, 0.0],
rm_ratio=0.0,
plane=[0, 0, 1],
normal=True,
)
self.assertTrue(np.all(-np.finfo(np.float32).eps <= gb_cu_010_conv1.frac_coords))
self.assertTrue(np.all(1 + np.finfo(np.float32).eps >= gb_cu_010_conv1.frac_coords))
# from fcc conventional cell,axis [1,2,3], siamg 9
gb_cu_123_conv1 = self.GB_Cu_conv.gb_from_parameters(
[1, 2, 3], 123.74898859588858, expand_times=4, vacuum_thickness=1.5
)
lat_mat1 = gb_cu_123_conv1.lattice.matrix
self.assertAlmostEqual(np.dot(lat_mat1[0], [1, 2, 3]), 0)
self.assertAlmostEqual(np.dot(lat_mat1[1], [1, 2, 3]), 0)
# test plane
gb_cu_123_conv2 = self.GB_Cu_conv.gb_from_parameters(
[1, 2, 3],
123.74898859588858,
expand_times=2,
vacuum_thickness=1.5,
normal=False,
plane=[1, 3, 1],
)
lat_mat2 = gb_cu_123_conv2.lattice.matrix
self.assertAlmostEqual(np.dot(lat_mat2[0], [1, 3, 1]), 0)
self.assertAlmostEqual(np.dot(lat_mat2[1], [1, 3, 1]), 0)
# from hex cell,axis [1,1,1], sigma 21
gb_Be_111_1 = self.GB_Be.gb_from_parameters(
[1, 1, 1],
147.36310249644626,
ratio=[5, 2],
expand_times=4,
vacuum_thickness=1.5,
plane=[1, 2, 1],
)
lat_priv = self.Be.lattice.matrix
lat_mat1 = np.matmul(gb_Be_111_1.lattice.matrix, np.linalg.inv(lat_priv))
self.assertAlmostEqual(np.dot(lat_mat1[0], [1, 2, 1]), 0)
self.assertAlmostEqual(np.dot(lat_mat1[1], [1, 2, 1]), 0)
# test volume associated with sigma value
gb_Be_111_2 = self.GB_Be.gb_from_parameters([1, 1, 1], 147.36310249644626, ratio=[5, 2], expand_times=4)
vol_ratio = gb_Be_111_2.volume / self.Be.volume
self.assertAlmostEqual(vol_ratio, 19 * 2 * 4)
# test ratio = None, axis [0,0,1], sigma 7
gb_Be_111_3 = self.GB_Be.gb_from_parameters([0, 0, 1], 21.786789298261812, ratio=[5, 2], expand_times=4)
gb_Be_111_4 = self.GB_Be.gb_from_parameters([0, 0, 1], 21.786789298261812, ratio=None, expand_times=4)
self.assertTupleEqual(gb_Be_111_3.lattice.abc, gb_Be_111_4.lattice.abc)
self.assertTupleEqual(gb_Be_111_3.lattice.angles, gb_Be_111_4.lattice.angles)
gb_Be_111_5 = self.GB_Be.gb_from_parameters([3, 1, 0], 180.0, ratio=[5, 2], expand_times=4)
gb_Be_111_6 = self.GB_Be.gb_from_parameters([3, 1, 0], 180.0, ratio=None, expand_times=4)
self.assertTupleEqual(gb_Be_111_5.lattice.abc, gb_Be_111_6.lattice.abc)
self.assertTupleEqual(gb_Be_111_5.lattice.angles, gb_Be_111_6.lattice.angles)
# gb from tetragonal cell, axis[1,1,1], sigma 15
gb_Pa_111_1 = self.GB_Pa.gb_from_parameters(
[1, 1, 1], 151.92751306414706, ratio=[2, 3], expand_times=4, max_search=10
)
vol_ratio = gb_Pa_111_1.volume / self.Pa.volume
self.assertAlmostEqual(vol_ratio, 17 * 2 * 4)
# gb from orthorhombic cell, axis[1,1,1], sigma 83
gb_Br_111_1 = self.GB_Br.gb_from_parameters(
[1, 1, 1],
131.5023374652235,
ratio=[21, 20, 5],
expand_times=4,
max_search=10,
)
vol_ratio = gb_Br_111_1.volume / self.Br.volume
self.assertAlmostEqual(vol_ratio, 83 * 2 * 4)
# gb from rhombohedra cell, axis[1,2,0], sigma 63
gb_Bi_120_1 = self.GB_Bi.gb_from_parameters(
[1, 2, 0], 63.310675060280246, ratio=[19, 5], expand_times=4, max_search=5
)
vol_ratio = gb_Bi_120_1.volume / self.Bi.volume
self.assertAlmostEqual(vol_ratio, 59 * 2 * 4)
def test_get_ratio(self):
# hexagnal
Be_ratio = self.GB_Be.get_ratio(max_denominator=2)
self.assertListEqual(Be_ratio, [5, 2])
Be_ratio = self.GB_Be.get_ratio(max_denominator=5)
self.assertListEqual(Be_ratio, [12, 5])
# tetragonal
Pa_ratio = self.GB_Pa.get_ratio(max_denominator=5)
self.assertListEqual(Pa_ratio, [2, 3])
# orthorombic
Br_ratio = self.GB_Br.get_ratio(max_denominator=5)
self.assertListEqual(Br_ratio, [21, 20, 5])
# orthorombic
Bi_ratio = self.GB_Bi.get_ratio(max_denominator=5)
self.assertListEqual(Bi_ratio, [19, 5])
def test_enum_sigma_cubic(self):
true_100 = [5, 13, 17, 25, 29, 37, 41]
true_110 = [3, 9, 11, 17, 19, 27, 33, 41, 43]
true_111 = [3, 7, 13, 19, 21, 31, 37, 39, 43, 49]
sigma_100 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [1, 0, 0]).keys())
sigma_110 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [1, 1, 0]).keys())
sigma_111 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [1, 1, 1]).keys())
sigma_222 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [2, 2, 2]).keys())
sigma_888 = list(GrainBoundaryGenerator.enum_sigma_cubic(50, [8, 8, 8]).keys())
self.assertListEqual(sorted(true_100), sorted(sigma_100))
self.assertListEqual(sorted(true_110), sorted(sigma_110))
self.assertListEqual(sorted(true_111), sorted(sigma_111))
self.assertListEqual(sorted(true_111), sorted(sigma_222))
self.assertListEqual(sorted(true_111), sorted(sigma_888))
def test_enum_sigma_hex(self):
true_100 = [17, 18, 22, 27, 38, 41]
true_001 = [7, 13, 19, 31, 37, 43, 49]
true_210 = [10, 11, 14, 25, 35, 49]
sigma_100 = list(GrainBoundaryGenerator.enum_sigma_hex(50, [1, 0, 0], [8, 3]).keys())
sigma_001 = list(GrainBoundaryGenerator.enum_sigma_hex(50, [0, 0, 1], [8, 3]).keys())
sigma_210 = list(GrainBoundaryGenerator.enum_sigma_hex(50, [2, 1, 0], [8, 3]).keys())
sigma_420 = list(GrainBoundaryGenerator.enum_sigma_hex(50, [4, 2, 0], [8, 3]).keys())
sigma_840 = list(GrainBoundaryGenerator.enum_sigma_hex(50, [8, 4, 0], [8, 3]).keys())
self.assertListEqual(sorted(true_100), sorted(sigma_100))
self.assertListEqual(sorted(true_001), sorted(sigma_001))
self.assertListEqual(sorted(true_210), sorted(sigma_210))
self.assertListEqual(sorted(true_210), sorted(sigma_420))
self.assertListEqual(sorted(true_210), sorted(sigma_840))
def test_enum_sigma_tet(self):
true_100 = [5, 37, 41, 13, 3, 15, 39, 25, 17, 29]
true_331 = [9, 3, 21, 39, 7, 31, 43, 13, 19, 37, 49]
sigma_100 = list(GrainBoundaryGenerator.enum_sigma_tet(50, [1, 0, 0], [9, 1]).keys())
sigma_331 = list(GrainBoundaryGenerator.enum_sigma_tet(50, [3, 3, 1], [9, 1]).keys())
self.assertListEqual(sorted(true_100), sorted(sigma_100))
self.assertListEqual(sorted(true_331), sorted(sigma_331))
def test_enum_sigma_ort(self):
true_100 = [41, 37, 39, 5, 15, 17, 13, 3, 25, 29]
sigma_100 = list(GrainBoundaryGenerator.enum_sigma_ort(50, [1, 0, 0], [270, 30, 29]).keys())
self.assertListEqual(sorted(true_100), sorted(sigma_100))
def test_enum_sigma_rho(self):
true_100 = [7, 11, 43, 13, 41, 19, 47, 31]
sigma_100 = list(GrainBoundaryGenerator.enum_sigma_rho(50, [1, 0, 0], [15, 4]).keys())
self.assertListEqual(sorted(true_100), sorted(sigma_100))
def test_enum_possible_plane_cubic(self):
all_plane = GrainBoundaryGenerator.enum_possible_plane_cubic(4, [1, 1, 1], 60)
self.assertEqual(len(all_plane["Twist"]), 1)
self.assertEqual(len(all_plane["Symmetric tilt"]), 6)
self.assertEqual(len(all_plane["Normal tilt"]), 12)
def test_get_trans_mat(self):
mat1, mat2 = GrainBoundaryGenerator.get_trans_mat(
[1, 1, 1],
95.55344419565849,
lat_type="o",
ratio=[10, 20, 21],
surface=[21, 20, 10],
normal=True,
)
self.assertAlmostEqual(np.dot(mat1[0], [21, 20, 10]), 0)
self.assertAlmostEqual(np.dot(mat1[1], [21, 20, 10]), 0)
self.assertAlmostEqual(np.linalg.det(mat1), np.linalg.det(mat2))
ab_len1 = np.linalg.norm(np.cross(mat1[2], [1, 1, 1]))
self.assertAlmostEqual(ab_len1, 0)
def test_get_rotation_angle_from_sigma(self):
true_angle = [12.680383491819821, 167.3196165081802]
angle = GrainBoundaryGenerator.get_rotation_angle_from_sigma(41, [1, 0, 0], lat_type="o", ratio=[270, 30, 29])
self.assertArrayAlmostEqual(true_angle, angle)
close_angle = [36.86989764584403, 143.13010235415598]
angle = GrainBoundaryGenerator.get_rotation_angle_from_sigma(6, [1, 0, 0], lat_type="o", ratio=[270, 30, 29])
self.assertArrayAlmostEqual(close_angle, angle)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/analysis/gb/tests/test_grain.py
|
Python
|
mit
| 16,955
|
[
"pymatgen"
] |
c64abea1d536f1c82ae65690fa7c2f3c165f9a390e81b5cf9d5155acd7faeccf
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.workflows_v1.services.workflows import WorkflowsAsyncClient
from google.cloud.workflows_v1.services.workflows import WorkflowsClient
from google.cloud.workflows_v1.services.workflows import pagers
from google.cloud.workflows_v1.services.workflows import transports
from google.cloud.workflows_v1.types import workflows
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert WorkflowsClient._get_default_mtls_endpoint(None) is None
assert WorkflowsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
WorkflowsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
WorkflowsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
WorkflowsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert WorkflowsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [WorkflowsClient, WorkflowsAsyncClient,])
def test_workflows_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "workflows.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.WorkflowsGrpcTransport, "grpc"),
(transports.WorkflowsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_workflows_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [WorkflowsClient, WorkflowsAsyncClient,])
def test_workflows_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "workflows.googleapis.com:443"
def test_workflows_client_get_transport_class():
transport = WorkflowsClient.get_transport_class()
available_transports = [
transports.WorkflowsGrpcTransport,
]
assert transport in available_transports
transport = WorkflowsClient.get_transport_class("grpc")
assert transport == transports.WorkflowsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc"),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
WorkflowsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowsClient)
)
@mock.patch.object(
WorkflowsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WorkflowsAsyncClient),
)
def test_workflows_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(WorkflowsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(WorkflowsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc", "true"),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc", "false"),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
WorkflowsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowsClient)
)
@mock.patch.object(
WorkflowsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WorkflowsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_workflows_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [WorkflowsClient, WorkflowsAsyncClient])
@mock.patch.object(
WorkflowsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowsClient)
)
@mock.patch.object(
WorkflowsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WorkflowsAsyncClient),
)
def test_workflows_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc"),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_workflows_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc", grpc_helpers),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_workflows_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_workflows_client_client_options_from_dict():
with mock.patch(
"google.cloud.workflows_v1.services.workflows.transports.WorkflowsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = WorkflowsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc", grpc_helpers),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_workflows_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"workflows.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="workflows.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [workflows.ListWorkflowsRequest, dict,])
def test_list_workflows(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.ListWorkflowsResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.ListWorkflowsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListWorkflowsPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_workflows_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
client.list_workflows()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.ListWorkflowsRequest()
@pytest.mark.asyncio
async def test_list_workflows_async(
transport: str = "grpc_asyncio", request_type=workflows.ListWorkflowsRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
workflows.ListWorkflowsResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.ListWorkflowsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListWorkflowsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_workflows_async_from_dict():
await test_list_workflows_async(request_type=dict)
def test_list_workflows_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.ListWorkflowsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
call.return_value = workflows.ListWorkflowsResponse()
client.list_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_workflows_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.ListWorkflowsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
workflows.ListWorkflowsResponse()
)
await client.list_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_workflows_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.ListWorkflowsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_workflows(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_workflows_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_workflows(
workflows.ListWorkflowsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_workflows_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.ListWorkflowsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
workflows.ListWorkflowsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_workflows(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_workflows_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_workflows(
workflows.ListWorkflowsRequest(), parent="parent_value",
)
def test_list_workflows_pager(transport_name: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
workflows.ListWorkflowsResponse(
workflows=[
workflows.Workflow(),
workflows.Workflow(),
workflows.Workflow(),
],
next_page_token="abc",
),
workflows.ListWorkflowsResponse(workflows=[], next_page_token="def",),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(),], next_page_token="ghi",
),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(), workflows.Workflow(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_workflows(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, workflows.Workflow) for i in results)
def test_list_workflows_pages(transport_name: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
workflows.ListWorkflowsResponse(
workflows=[
workflows.Workflow(),
workflows.Workflow(),
workflows.Workflow(),
],
next_page_token="abc",
),
workflows.ListWorkflowsResponse(workflows=[], next_page_token="def",),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(),], next_page_token="ghi",
),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(), workflows.Workflow(),],
),
RuntimeError,
)
pages = list(client.list_workflows(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_workflows_async_pager():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_workflows), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
workflows.ListWorkflowsResponse(
workflows=[
workflows.Workflow(),
workflows.Workflow(),
workflows.Workflow(),
],
next_page_token="abc",
),
workflows.ListWorkflowsResponse(workflows=[], next_page_token="def",),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(),], next_page_token="ghi",
),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(), workflows.Workflow(),],
),
RuntimeError,
)
async_pager = await client.list_workflows(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, workflows.Workflow) for i in responses)
@pytest.mark.asyncio
async def test_list_workflows_async_pages():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_workflows), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
workflows.ListWorkflowsResponse(
workflows=[
workflows.Workflow(),
workflows.Workflow(),
workflows.Workflow(),
],
next_page_token="abc",
),
workflows.ListWorkflowsResponse(workflows=[], next_page_token="def",),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(),], next_page_token="ghi",
),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(), workflows.Workflow(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_workflows(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [workflows.GetWorkflowRequest, dict,])
def test_get_workflow(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.Workflow(
name="name_value",
description="description_value",
state=workflows.Workflow.State.ACTIVE,
revision_id="revision_id_value",
service_account="service_account_value",
source_contents="source_contents_value",
)
response = client.get_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.GetWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, workflows.Workflow)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == workflows.Workflow.State.ACTIVE
assert response.revision_id == "revision_id_value"
assert response.service_account == "service_account_value"
def test_get_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
client.get_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.GetWorkflowRequest()
@pytest.mark.asyncio
async def test_get_workflow_async(
transport: str = "grpc_asyncio", request_type=workflows.GetWorkflowRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
workflows.Workflow(
name="name_value",
description="description_value",
state=workflows.Workflow.State.ACTIVE,
revision_id="revision_id_value",
service_account="service_account_value",
)
)
response = await client.get_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.GetWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, workflows.Workflow)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == workflows.Workflow.State.ACTIVE
assert response.revision_id == "revision_id_value"
assert response.service_account == "service_account_value"
@pytest.mark.asyncio
async def test_get_workflow_async_from_dict():
await test_get_workflow_async(request_type=dict)
def test_get_workflow_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.GetWorkflowRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
call.return_value = workflows.Workflow()
client.get_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_workflow_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.GetWorkflowRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflows.Workflow())
await client.get_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_workflow_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.Workflow()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_workflow(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_workflow_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_workflow(
workflows.GetWorkflowRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_workflow_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.Workflow()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflows.Workflow())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_workflow(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_workflow_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_workflow(
workflows.GetWorkflowRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [workflows.CreateWorkflowRequest, dict,])
def test_create_workflow(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.CreateWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
client.create_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.CreateWorkflowRequest()
@pytest.mark.asyncio
async def test_create_workflow_async(
transport: str = "grpc_asyncio", request_type=workflows.CreateWorkflowRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.CreateWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_workflow_async_from_dict():
await test_create_workflow_async(request_type=dict)
def test_create_workflow_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.CreateWorkflowRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_workflow_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.CreateWorkflowRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_workflow_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_workflow(
parent="parent_value",
workflow=workflows.Workflow(name="name_value"),
workflow_id="workflow_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].workflow
mock_val = workflows.Workflow(name="name_value")
assert arg == mock_val
arg = args[0].workflow_id
mock_val = "workflow_id_value"
assert arg == mock_val
def test_create_workflow_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_workflow(
workflows.CreateWorkflowRequest(),
parent="parent_value",
workflow=workflows.Workflow(name="name_value"),
workflow_id="workflow_id_value",
)
@pytest.mark.asyncio
async def test_create_workflow_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_workflow(
parent="parent_value",
workflow=workflows.Workflow(name="name_value"),
workflow_id="workflow_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].workflow
mock_val = workflows.Workflow(name="name_value")
assert arg == mock_val
arg = args[0].workflow_id
mock_val = "workflow_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_workflow_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_workflow(
workflows.CreateWorkflowRequest(),
parent="parent_value",
workflow=workflows.Workflow(name="name_value"),
workflow_id="workflow_id_value",
)
@pytest.mark.parametrize("request_type", [workflows.DeleteWorkflowRequest, dict,])
def test_delete_workflow(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.DeleteWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
client.delete_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.DeleteWorkflowRequest()
@pytest.mark.asyncio
async def test_delete_workflow_async(
transport: str = "grpc_asyncio", request_type=workflows.DeleteWorkflowRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.DeleteWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_workflow_async_from_dict():
await test_delete_workflow_async(request_type=dict)
def test_delete_workflow_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.DeleteWorkflowRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_workflow_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.DeleteWorkflowRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_workflow_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_workflow(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_workflow_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_workflow(
workflows.DeleteWorkflowRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_workflow_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_workflow(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_workflow_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_workflow(
workflows.DeleteWorkflowRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [workflows.UpdateWorkflowRequest, dict,])
def test_update_workflow(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.UpdateWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
client.update_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.UpdateWorkflowRequest()
@pytest.mark.asyncio
async def test_update_workflow_async(
transport: str = "grpc_asyncio", request_type=workflows.UpdateWorkflowRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.UpdateWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_workflow_async_from_dict():
await test_update_workflow_async(request_type=dict)
def test_update_workflow_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.UpdateWorkflowRequest()
request.workflow.name = "workflow.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "workflow.name=workflow.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_workflow_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.UpdateWorkflowRequest()
request.workflow.name = "workflow.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "workflow.name=workflow.name/value",) in kw[
"metadata"
]
def test_update_workflow_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_workflow(
workflow=workflows.Workflow(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].workflow
mock_val = workflows.Workflow(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_workflow_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_workflow(
workflows.UpdateWorkflowRequest(),
workflow=workflows.Workflow(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_workflow_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_workflow(
workflow=workflows.Workflow(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].workflow
mock_val = workflows.Workflow(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_workflow_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_workflow(
workflows.UpdateWorkflowRequest(),
workflow=workflows.Workflow(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WorkflowsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = WorkflowsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = WorkflowsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WorkflowsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = WorkflowsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.WorkflowsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.WorkflowsGrpcTransport,)
def test_workflows_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.WorkflowsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_workflows_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.workflows_v1.services.workflows.transports.WorkflowsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.WorkflowsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_workflows",
"get_workflow",
"create_workflow",
"delete_workflow",
"update_workflow",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_workflows_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.workflows_v1.services.workflows.transports.WorkflowsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.WorkflowsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_workflows_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.workflows_v1.services.workflows.transports.WorkflowsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.WorkflowsTransport()
adc.assert_called_once()
def test_workflows_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
WorkflowsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport,],
)
def test_workflows_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.WorkflowsGrpcTransport, grpc_helpers),
(transports.WorkflowsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_workflows_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"workflows.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="workflows.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport],
)
def test_workflows_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_workflows_host_no_port():
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="workflows.googleapis.com"
),
)
assert client.transport._host == "workflows.googleapis.com:443"
def test_workflows_host_with_port():
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="workflows.googleapis.com:8000"
),
)
assert client.transport._host == "workflows.googleapis.com:8000"
def test_workflows_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.WorkflowsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_workflows_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.WorkflowsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport],
)
def test_workflows_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport],
)
def test_workflows_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_workflows_grpc_lro_client():
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_workflows_grpc_lro_async_client():
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_workflow_path():
project = "squid"
location = "clam"
workflow = "whelk"
expected = "projects/{project}/locations/{location}/workflows/{workflow}".format(
project=project, location=location, workflow=workflow,
)
actual = WorkflowsClient.workflow_path(project, location, workflow)
assert expected == actual
def test_parse_workflow_path():
expected = {
"project": "octopus",
"location": "oyster",
"workflow": "nudibranch",
}
path = WorkflowsClient.workflow_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_workflow_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = WorkflowsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = WorkflowsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = WorkflowsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = WorkflowsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = WorkflowsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = WorkflowsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = WorkflowsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = WorkflowsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = WorkflowsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = WorkflowsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.WorkflowsTransport, "_prep_wrapped_messages"
) as prep:
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.WorkflowsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = WorkflowsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport),
(WorkflowsAsyncClient, transports.WorkflowsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-workflows
|
tests/unit/gapic/workflows_v1/test_workflows.py
|
Python
|
apache-2.0
| 94,924
|
[
"Octopus"
] |
6373d93214c179e0cf0da444b8082c3c744ff029a6d9e136793ce02ef85d0d69
|
# $HeadURL $
''' ResourceStatusHandler
Module that allows users to access the ResourceStatusDB remotely.
'''
from DIRAC import gLogger, S_OK
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.ResourceStatusSystem.DB.ResourceStatusDB import ResourceStatusDB
__RCSID__ = '$Id: $'
db = None
def initializeResourceStatusHandler( _serviceInfo ):
'''
Handler initialization, where we set the ResourceStatusDB as global db, and
we instantiate the synchronizer.
'''
global db
db = ResourceStatusDB()
# Regenerates DB tables if needed
db._checkTable()
return S_OK()
################################################################################
class ResourceStatusHandler( RequestHandler ):
'''
The ResourceStatusHandler exposes the DB front-end functions through a XML-RPC
server, functionalities inherited from
:class:`DIRAC.Core.DISET.RequestHandler.RequestHandler`
According to the ResourceStatusDB philosophy, only functions of the type:
- insert
- update
- select
- delete
are exposed. If you need anything more complicated, either look for it on the
:class:`ResourceStatusClient`, or code it yourself. This way the DB and the
Service are kept clean and tidied.
To can use this service on this way, but you MUST NOT DO IT. Use it through the
:class:`ResourceStatusClient`. If offers in the worst case as good performance
as the :class:`ResourceStatusHandler`, if not better.
>>> from DIRAC.Core.DISET.RPCClient import RPCCLient
>>> server = RPCCLient( "ResourceStatus/ResourceStatus" )
'''
def __init__( self, *args, **kwargs ):
super( ResourceStatusHandler, self ).__init__( *args, **kwargs )
@staticmethod
def __logResult( methodName, result ):
'''
Method that writes to log error messages
'''
if not result[ 'OK' ]:
gLogger.error( '%s%s' % ( methodName, result[ 'Message' ] ) )
@staticmethod
def setDatabase( database ):
'''
This method let us inherit from this class and overwrite the database object
without having problems with the global variables.
:Parameters:
**database** - `MySQL`
database used by this handler
:return: None
'''
global db
db = database
types_insert = [ dict, dict ]
def export_insert( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceStatusDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'insert: %s %s' % ( params, meta ) )
res = db.insert( params, meta )
self.__logResult( 'insert', res )
return res
types_update = [ dict, dict ]
def export_update( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceStatusDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'update: %s %s' % ( params, meta ) )
res = db.update( params, meta )
self.__logResult( 'update', res )
return res
types_select = [ dict, dict ]
def export_select( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceStatusDB` remotely. It \
does not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'select: %s %s' % ( params, meta ) )
res = db.select( params, meta )
self.__logResult( 'select', res )
return res
types_delete = [ dict, dict ]
def export_delete( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceStatusDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'delete: %s %s' % ( params, meta ) )
res = db.delete( params, meta )
self.__logResult( 'delete', res )
return res
types_addOrModify = [ dict, dict ]
def export_addOrModify( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceStatusDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'addOrModify: %s %s' % ( params, meta ) )
res = db.addOrModify( params, meta )
self.__logResult( 'addOrModify', res )
return res
types_modify = [ dict, dict ]
def export_modify( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceStatusDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'modify: %s %s' % ( params, meta ) )
res = db.modify( params, meta )
self.__logResult( 'modify', res )
return res
types_addIfNotThere = [ dict, dict ]
def export_addIfNotThere( self, params, meta ):
'''
This method is a bridge to access :class:`ResourceStatusDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**args** - `tuple`
arguments for the mysql query ( must match table columns ! ).
**kwargs** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
gLogger.info( 'addIfNotThere: %s %s' % ( params, meta ) )
res = db.addIfNotThere( params, meta )
self.__logResult( 'addIfNotThere', res )
return res
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
avedaee/DIRAC
|
ResourceStatusSystem/Service/ResourceStatusHandler.py
|
Python
|
gpl-3.0
| 7,929
|
[
"DIRAC"
] |
7f2ba0b8346b25733ddb1732be14ae7126eed19d81d2274ee3e0a8102cc0273c
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
*****************************
units - convert to real units
*****************************
Espresso++ returns temperature, energy, pressure, box length etc. in dimensionless units. Usually user should take care about real length, energy, mass and charge units. This python class is a helper in order to simplify the conversion which is based on basic units. However, user always should use it carefully for complicated systems.
Currently it is implemented for SI units. Make sure that you are using
length in [nm]
energy in [kJ/mol]
mass in [amu]
q in [e]
and it will return you
pressure in [bar]
temperature in [K]
time in [ps]
density in [kg/m^3]
Example:
'''
import espressopp
import math
kB = 1.3806488 * pow(10,-23) # m^2 * kg * s^-2 * K^-1
Na = 6.0221413 * pow(10, 23) # mol^-1
amu = 1.6605389 #* pow(10,-27)
class Real_Units:
def __init__(self, _length, _energy, _mass, _charge):
self.length_factor = _length
self.energy_factor = _energy
self.mass_factor = _mass
self.charge_factor = _charge
self.pressure_factor = self.energy_factor / pow(self.length_factor, 3)
self.temperature_factor = self.energy_factor / (kB * Na) * 1000
self.time_factor = self.length_factor * math.sqrt( self.mass_factor / self.energy_factor)
self.density_factor = self.mass_factor * amu / pow(self.length_factor, 3)
def length(self, dl_length):
return dl_length * self.length_factor
def energy(self, dl_energy):
return dl_energy * self.energy_factor
def mass(self, dl_mass):
return dl_mass * self.mass_factor
def charge(self, dl_charge):
return dl_charge * self.charge_factor
def pressure(self, dl_pressure):
return dl_pressure * self.pressure_factor
def temperature(self, dl_temperature):
return dl_temperature * self.temperature_factor
def time(self, dl_time):
return dl_time * self.time_factor
def density(self, dl_density):
return dl_density * self.density_factor
# the other way arround
def dl_length(self, dl_length):
return dl_length / self.length_factor
def dl_energy(self, energy):
return energy / self.energy_factor
def dl_mass(self, mass):
return mass / self.mass_factor
def dl_charge(self, charge):
return charge / self.charge_factor
def dl_pressure(self, pressure):
return pressure / self.pressure_factor
def dl_temperature(self, temperature):
return temperature / self.temperature_factor
def dl_time(self, time):
return time / self.time_factor
def dl_density(self, density):
return density / self.density_factor
|
fedepad/espressopp
|
src/tools/units.py
|
Python
|
gpl-3.0
| 3,518
|
[
"ESPResSo"
] |
2b5b8027b123a0c2edfc895a7d99d9b2276e47ba4f1798c5ea15fd4595969514
|
# Copyright (c) 2010, 2014, 2015, 2020, 2021 Smithsonian Astrophysical Observatory
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Smithsonian Astrophysical Observatory nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Support multiple data sets when using Sherpa commands.
The ``sherpa.astro.datastack`` module supports manipulating a stack
of related datasets and simultaneously fitting a model to them. It
provides stack-enabled (i.e. vectorized) versions of the key Sherpa
commands used to load data, set source models, get and set parameters,
fit, and plot. It is intended for use when simultaneously fitting a
common source model to multiple data sets.
Acknowledgements
----------------
The Datastack code was developed by Thomas Aldcroft and originally
provided as an external package for Sherpa, available at
http://cxc.harvard.edu/contrib/datastack/. The code was added to
Sherpa in version 4.7.1.
Example: PHA data
-----------------
In the following example, ``src.lis`` is an ASCII text file
with the name of a PHA file to load on each line::
from sherpa.astro import datastack
from sherpa.astro import ui
datastack.load_pha("@src.lis")
At this point the PHA files are loaded into data sets ``1`` to ``n``,
where ``n`` is the number of lines in the ``src.lis``. Any ancillary
files - such as background, ARF, and RMF - will be loaded in as if
the files were loaded separately.
The loaded data sets can be shown using::
datastack.show_stack()
The module uses the special identifier ``[]`` to indicate all
members of a stack, so::
datastack.set_source([], ui.xsphabs.gal * ui.xspowerlaw.pl)
will set each file to have the *same* model (in this case an
absorbed power law). Adding the suffix ``__ID`` to a component
name will create a separate component for each data set; so::
src = ui.const1d.c__ID * ui.xsphabs.gal * ui.xspowerlaw.pl
datastack.set_source([], src)
ui.freeze(pl.norm)
will have a common absorbing component (``gal``) and power law
model (``pl``), but each data set has a separate constant term
labelled ``c`` followed by the data set identifier (e.g.
``c1`` and ``c2``). Since the normalization of the power law
component has been frozen the constant term represents the
normalization of each component (i.e. the model shape is
assumed constant, but its amplitude is not). These expressions
can be viewed using the command::
ui.show_source()
The ``integrate`` flag of the constant model component should
be turned off (so that the component acts as a scalar term rather
than including the bin-width). The ``datastack`` module does not
provide a simple way to do this, so the setting of each component
has to be changed individually::
for did in datastack.get_stack_ids():
mdl = ui.get_model_component('c{}'.format(did))
mdl.integrate = False
The ``datastack`` module provides versions of the ``sherpa.astro.ui``
module which accept ``[]``, so::
datastack.subtract([])
will subtract the background from each data set. Some commands are
the same - so either of the following will filter the data::
ui.notice(0.5, 7)
datastack.notice(0.5, 7)
The data and model for each data set can be viewed with::
datastack.plot_fit([])
and the model fit to all the data sets as normal::
ui.fit()
Loading data
------------
Multiple inputs (referred to as a stack here) can be specified
either from an ASCII file - one file name per line - by placing
the ``@`` character before the file name, or as a comma-separated
list of names:
- ``load_data``
- ``load_ascii``
- ``load_pha``
- ``load_bkg``
The ``load_arrays`` function is slightly different, in that it
accepts a list of array arguments, one for each dataset.
Examples include::
datastack.load_data("@srcs.lis")
datastack.load_pha("obs1.pha,obs2.pha,obs3.pha")
datastack.load_arrays([[x1, y1], [x2, y2]])
Identifying a stack
-------------------
When reading in a stack of data, the individual data sets
are numbered sequentially. These identifiers can be used to
select individual data sets using functions from the
``sherpa.astro.ui`` module. The functions from the ``datastack``
module work with a datastack identifier, which can be:
- ``[]``
- an iterable sequence of data set identifiers
- a datastack instance reference
- a subset of a datastack instance
So::
datastack.plot_data([])
datastack.plot_data([1,3])
plots all the data sets, and then just the first and third entries.
The following repeats the example, using a ``DataStack`` object::
ds = datastack.DataStack()
ds.load_data('@src.lis')
datastack.plot_data(ds)
datastack.plot_data(ds[1,3])
Note that when accessing a subset of a DataStack object - e.g.
``ds[1,3]`` - the numbers match the data set identifiers (and so
start at ``1``, not ``0``).
Setting a model for a stack
---------------------------
The functions
- ``set_source``
- ``set_model``
- ``set_bkg_model``
- ``set_full_model``
- ``set_bkg_full_model``
can be used to set the source expression for a datastack. This
expression can include models with components that are shared between
data sets and models which have a component per data set. The
later case are created by using the identifier ``__ID`` in the
name of the component. The following call will fit the sum of a
polynomial and gaussian model to the data, with the same parameters
used for each data set (the model components are called ``bgnd``
and ``src`` respectively)::
datastack.set_source([], ui.polynom1d.bgnd + ui.gauss1d.src)
whereas::
datastack.set_source([], ui.polynom1d.bgnd__ID + ui.gauss1d.src)
fits a single gaussian model (``src``) to all data sets, but allows the
polynomial to vary between datasets (with names ``bgnd1``, ``bgnd2``,
...).
Utility functions for data stacks
---------------------------------
The following functions are provided:
- ``get_stack_ids``, which returns the data set identifiers that are
included in the data stack,
- ``show_stack``, which prints the data sets available in the data stack,
together with some basic metadata,
- ``clear_models``, to remove all the model expressions from a data stack,
- and ``clear_stack``, to remove all the data sets that are part of the
data stack.
Information about data sets which match a particular query are provided
by the ``query_by_header_keyword``, ``query_by_obsid``, and ``query``
functions.
"""
import logging
import sys
import types
from sherpa.astro import ui
from sherpa.utils.logging import config_logger
from sherpa.utils import public
from sherpa.astro.datastack.ds import DataStack
from .utils import set_template_id
logger = config_logger(__name__)
__all__ = ['set_template_id', 'DataStack']
@public
def set_stack_verbosity(level):
"""Change the logging level.
Informational messages from the datastack module are displayed using
the Python logging system. This routine determines the severity
of the messages that are displayed.
Parameters
----------
level
The logging level to use (e.g. `logging.INFO` or
`logging.WARNING`).
See Also
--------
set_stack_verbose
Examples
--------
>>> set_stack_verbosity(logging.ERROR)
"""
logger.setLevel(level)
@public
def set_stack_verbose(verbose=True):
"""Should stack functions print informational messages?
Parameters
----------
verbose : bool, opt
If ``True`` then display messages at the logging level
of ``INFO`` or above. If ``False`` only display
``WARNING`` messages or above.
"""
if verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
_always_wrapped = ('load_pha', 'load_arrays', 'load_ascii', 'load_data',
'load_bkg')
# Use this and subsequent loop to wrap every function in sherpa.astro.ui
# with a datastack version
def _sherpa_ui_wrap(func):
def wrap(*args, **kwargs):
wrapfunc = func
if args:
if isinstance(args[0], DataStack):
datastack, args = args[0], args[1:]
# If the first argument is a list and it's either empty or
# made of non-iterables, then it's a datastack definition.
# If the list contains iterable it must be arrays for load_arrays.
elif isinstance(args[0], list) and \
not (len(args[0]) > 0 and hasattr(args[0][0], '__iter__')):
datastack = DATASTACK[args[0]] if args[0] else DATASTACK
args = args[1:]
else:
if func.__name__ in _always_wrapped:
# some (all?) load_* functions must always be wrapped
# for file stack syntax check and for ensuring dataset
# id consistency.
datastack = DATASTACK
else:
# No stack specifier so use native sherpa func
return func(*args, **kwargs)
try:
wrapfunc = getattr(datastack, func.__name__)
except AttributeError:
raise AttributeError(
'{0} is not a stack-enabled function.'.format(func.__name__))
return wrapfunc(*args, **kwargs)
wrap.__name__ = func.__name__
if not hasattr(DataStack, func.__name__):
doc = func.__doc__
else:
doc = getattr(DataStack, func.__name__).__doc__
wrap.__doc__ = doc
return wrap
def _datastack_wrap(func):
def wrap(*args, **kwargs):
if not args:
args = ([],) + args
if isinstance(args[0], DataStack):
datastack, args = args[0], args[1:]
elif isinstance(args[0], list) and \
not (len(args[0]) > 0 and hasattr(args[0][0], '__iter__')):
datastack = DATASTACK[args[0]] if args[0] else DATASTACK
args = args[1:]
else:
datastack = DATASTACK
return getattr(datastack, func.__name__)(*args, **kwargs)
wrap.__name__ = func.__name__
if not hasattr(DataStack, func.__name__):
doc = func.__doc__
else:
doc = getattr(DataStack, func.__name__).__doc__
wrap.__doc__ = doc
return wrap
# The default datastack
DATASTACK = DataStack()
# Wrap all sherpa UI funcs and a few DataStack methods for the
# command-line interface.
_module = sys.modules[__name__]
for attr in dir(ui):
func = getattr(ui, attr)
if isinstance(func, types.FunctionType):
setattr(_module, attr, public(_sherpa_ui_wrap(func)))
for funcname in ['clear_stack', 'show_stack', 'get_stack_ids',
'query', 'query_by_header_keyword', 'query_by_obsid']:
setattr(_module, funcname, public(
_datastack_wrap(getattr(DataStack, funcname))))
@public
def clean():
"""Remove the models and data from the data stack and Sherpa.
This function clears out the models and data set up in the data
stack and in the Sherpa session.
See Also
--------
clear_models, clear_stack
sherpa.astro.ui.clean
"""
DATASTACK.clear_models()
DATASTACK.clear_stack()
ui.clean()
logger.warning("clean() will invalidate any existing DataStack " +
"instances by removing all the datasets from the " +
"Sherpa session")
|
anetasie/sherpa
|
sherpa/astro/datastack/__init__.py
|
Python
|
gpl-3.0
| 12,731
|
[
"Gaussian"
] |
94a5c2db8b1266928257b368b66f695dc76e36b44e00bfe574c99af838e2f09c
|
# benchmark.py ---
#
# Filename: benchmark.py
# Description: Script for performance benchmarking
# Author:
# Maintainer:
# Created: Thu Jan 23 16:06:25 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""This script tries to run the test case multiple times with
different number of nodes and dump the performance. It shuffles the
runs with increasing number of processes in an attempt to avoid
systematic error based on process sequence.
"""
import getopt
import subprocess
import sys
import os
import socket
import multiprocessing
import subprocess
from datetime import datetime, timedelta
from collections import defaultdict
import random
def create_hostfile(slotcount=None, filename='hostfile'):
"""Create a file with name `filename` and write host info for openmpi
in this."""
if slotcount is None:
slotcount = multiprocessing.cpu_count()
with open(filename, 'w') as hostfile:
hostfile.write('%s slots=%d\n' % (socket.gethostname(), slotcount))
def run(script, scriptargs=[], hostfile='hostfile', np=2, ni=1, oversubscribe=False):
"""Run `script` with arguments in `scriptargs` list. Use `hostfile`
for host and slot info, use `np` moose processes in addition to
Python, do `ni` iterations to get average performance."""
if not oversubscribe:
np = multiprocessing.cpu_count()
s0 = datetime.now()
avg = defaultdict(timedelta)
procs = range(np)
for jj in range(ni):
random.shuffle(procs)
for ii in procs:
print 'Running with', ii, 'processes'
if ii > 0:
args = ['mpirun', '--hostfile', hostfile, '-np', '1', 'python', script] + scriptargs + [':', '-np', str(ii), '../../../moose']
else:
args = ['mpirun', '--hostfile', hostfile, '-np', '1', 'python', script] + scriptargs
print 'Running:', args
s1 = datetime.now()
ret = subprocess.check_output(args)
print '====================== OUTPUT START ======================'
print ret
print '====================== OUTPUT END ======================'
e1 = datetime.now()
d1 = e1 - s1
avg[ii] += d1
print 'Time to run ', args
print ' =', d1.days * 86400 + d1.seconds + 1e-6 * d1.microseconds
e0 = datetime.now()
d0 = e0 - s0
avg['all'] += d0
for ii in sorted(procs):
print 'Time to run using', ii, 'additional moose processes: '
print ' =', (avg[ii].days * 86400 + avg[ii].seconds + 1e-6 * avg[ii].microseconds) / ni
print 'Total time for all different process counts averaged over all', ni, 'iterations:', (avg['all'].days * 86400 + avg['all'].seconds + 1e-6 * avg['all'].microseconds) / ni
def print_usage(argv0):
print '''Usage: %s [-s slotcount] [-f hostfile] [-n maxprocess] [-i iterations] {script} [script arguments]
Run {script} using up to {slotcount} slots and display
execution time. If specified, the host information will be
written in `hostfile`. Default is "hostfile".''' % (argv0)
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) < 2:
print_usage(sys.argv[0])
hostfile = 'hostfile'
scriptargs = []
slots = None
np = 1
ni = 1
options, rest = getopt.getopt(sys.argv[1:], 'hf:s:n:i:', ['hostfile=','slots=', 'help', 'numproc='])
for opt, arg in options:
if opt in ('-h', '--help'):
print_usage(sys.argv[0])
elif opt in ('-f', '--hostfile'):
hostfile = arg
elif opt in ('-s', '--slots'):
slots = int(arg)
elif opt in ('-n', '--numproc'):
np = int(arg)
elif opt in ('-i', '--iterations'):
ni = int(arg)
if len(rest) < 1:
print_usage(sys.argv[0])
else:
script = rest[0]
if len(rest) > 1:
scriptargs = rest[1:]
create_hostfile(slotcount=slots, filename=hostfile)
run(script, scriptargs, hostfile=hostfile, np=np, ni=ni)
#
# benchmark.py ends here
|
dilawar/moose-full
|
moose-core/tests/python/mpi/benchmark.py
|
Python
|
gpl-2.0
| 4,953
|
[
"MOOSE"
] |
ab8ed6bdd28eedc1b1ffa611d34bc2a68f348e5c9fd30f3c7ea14fcc306e19b8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Description Tool for finding duplicates in an SDFile
##
## Authors: Inés Martínez (mmartinez4@imim.es)
## Manuel Pastor (manuel.pastor@upf.edu)
##
## Copyright 2015 Manuel Pastor
##
## This file is part of PhiTools
##
## PhiTools is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation version 3.
##
## PhiTools is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with PhiTools. If not, see <http://www.gnu.org/licenses/>
from rdkit import Chem
from phitools import moleculeHelper as mh
import sys, argparse
sep = '\t'
def findDuplicates (args):
idlist = []
nmlist = []
smlist = []
counter = 0
if args.type == 'smi':
sys.stdout.write('reading file with smiles...\n')
if args.header:
args.fn.readline()
for line in args.fn:
fields = line.decode('utf-8').rstrip().split(sep)
if len(fields) > args.col:
smiles = fields[args.col]
else:
continue
mol = Chem.MolFromSmiles(smiles)
counter+=1
if mol is None: continue
try:
inchi = Chem.MolToInchi(mol)
inkey = Chem.InchiToInchiKey(inchi)
except:
continue
name = fields[args.id]
idlist.append(inkey[:-3])
nmlist.append(name)
smlist.append(smiles)
else:
# SD file
sys.stdout.write('reading SDFile...\n')
suppl = Chem.ForwardSDMolSupplier(args.fn,removeHs=False, sanitize=False)
for mol in suppl:
counter+=1
if mol is None: continue
try:
inchi = Chem.MolToInchi(mol)
inkey = Chem.InchiToInchiKey(inchi)
smiles = Chem.MolToSmiles(mol)
except:
continue
name = mh.getName(mol, count= counter, field= args.id)
idlist.append(inkey[:-3])
nmlist.append(name)
smlist.append(smiles)
args.fn.close()
n = len(idlist)
sys.stdout.write('analizing duplicates...\n')
args.out.write('{}\n'.format('\t'.join(['i', 'j', 'namei', 'namej', 'smilesi', 'smilesj']))) # Header
duplicates = 0
for i in range (n):
for j in range (i+1,n):
if idlist[i]==idlist[j]:
args.out.write('{}\n'.format('\t'.join([str(i), str(j), nmlist[i], nmlist[j], smlist[i], smlist[j]])))
duplicates+=1
args.out.close()
sys.stdout.write('\n%d duplicate molecules found\n' %duplicates)
def main ():
parser = argparse.ArgumentParser(description='Find duplicated molecules. In the output file, the first columns present the properties of the first molecule duplicated, the last columns contain data about the second molecule identified.')
parser.add_argument('-f', '--fn', type=argparse.FileType('rb'), help='Input file', required=True)
parser.add_argument('-i', '--id', type=str, help='If the input is an SD file, specify the field (if any) with the molecule ID. If the input has smiles, specify the column with the molecule ID.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--smi', action='store_const', dest='type', const='smi', default='smi', help='The input format is a file with smiles strings (default).')
group.add_argument('-m', '--sdf', action='store_const', dest='type', const='sdf', help='The input format is an SD file.')
parser.add_argument('-c', '--col', type=int, default=1, help='If the input file has smiles, indicate which column contains the smiles strings.')
parser.add_argument('-n', '--noheader', action='store_false', dest='header', help='Input data file doesn\'t have a header line.')
parser.add_argument('-o', '--out', type=argparse.FileType('w'), default='duplicates.txt', help='Output file name (default: duplicates.txt)')
args = parser.parse_args()
if args.type == 'smi':
try:
args.id = int(args.id)-1
args.col = args.col-1
except:
sys.stderr.write('If the input file has smiles strings, the id field (-i) must contain the index of the column with the comound identifier.\n')
sys.exit()
findDuplicates(args)
if __name__ == '__main__':
main()
|
manuelpastor/phitools
|
phitools/findDuplicates.py
|
Python
|
gpl-3.0
| 4,810
|
[
"RDKit"
] |
1db3d7505532415404ebc3aa1650f9a2fa020e912d52e62ba920ae0f4b9f0e26
|
# Copyright (C) 2013 by Yanbo Ye (yeyanbo289@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes and methods for tree construction"""
__docformat__ = "restructuredtext en"
import sys
# Add path to Bio
sys.path.append('../..')
import itertools
import copy
from Bio.Phylo import BaseTree
from Bio.Align import MultipleSeqAlignment
from Bio.SubsMat import MatrixInfo
from Bio import _py3k
def _is_numeric(x):
return _py3k._is_int_or_long(x) or isinstance(x, (float, complex))
class _Matrix(object):
"""Base class for distance matrix or scoring matrix
Accepts a list of names and a lower triangular matrix.::
matrix = [[0],
[1, 0],
[2, 3, 0],
[4, 5, 6, 0]]
represents the symmetric matrix of
[0,1,2,4]
[1,0,3,5]
[2,3,0,6]
[4,5,6,0]
:Parameters:
names : list
names of elements, used for indexing
matrix : list
nested list of numerical lists in lower triangular format
Example
-------
>>> from Bio.Phylo.TreeConstruction import _Matrix
>>> names = ['Alpha', 'Beta', 'Gamma', 'Delta']
>>> matrix = [[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]]
>>> m = _Matrix(names, matrix)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]])
You can use two indices to get or assign an element in the matrix.
>>> m[1,2]
3
>>> m['Beta','Gamma']
3
>>> m['Beta','Gamma'] = 4
>>> m['Beta','Gamma']
4
Further more, you can use one index to get or assign a list of elements related to that index.
>>> m[0]
[0, 1, 2, 4]
>>> m['Alpha']
[0, 1, 2, 4]
>>> m['Alpha'] = [0, 7, 8, 9]
>>> m[0]
[0, 7, 8, 9]
>>> m[0,1]
7
Also you can delete or insert a column&row of elemets by index.
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
>>> del m['Alpha']
>>> m
_Matrix(names=['Beta', 'Gamma', 'Delta'], matrix=[[0], [4, 0], [5, 6, 0]])
>>> m.insert('Alpha', [0, 7, 8, 9] , 0)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
"""
def __init__(self, names, matrix=None):
"""Initialize matrix by a list of names and a list of
lower triangular matrix data"""
# check names
if isinstance(names, list) and all(isinstance(s, str) for s in names):
if len(set(names)) == len(names):
self.names = names
else:
raise ValueError("Duplicate names found")
else:
raise TypeError("'names' should be a list of strings")
# check matrix
if matrix is None:
# create a new one with 0 if matrix is not assigned
matrix = [[0] * i for i in range(1, len(self) + 1)]
self.matrix = matrix
else:
# check if all elements are numbers
if (isinstance(matrix, list)
and all(isinstance(l, list) for l in matrix)
and all(_is_numeric(n) for n in [item for sublist in matrix
for item in sublist])):
# check if the same length with names
if len(matrix) == len(names):
# check if is lower triangle format
if [len(m) for m in matrix] == list(range(1, len(self) + 1)):
self.matrix = matrix
else:
raise ValueError(
"'matrix' should be in lower triangle format")
else:
raise ValueError(
"'names' and 'matrix' should be the same size")
else:
raise TypeError("'matrix' should be a list of numerical lists")
def __getitem__(self, item):
"""Access value(s) by the index(s) or name(s).
For a _Matrix object 'dm'::
dm[i] get a value list from the given 'i' to others;
dm[i, j] get the value between 'i' and 'j';
dm['name'] map name to index first
dm['name1', 'name2'] map name to index first
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
return [self.matrix[index][i] for i in range(0, index)] + [self.matrix[i][index] for i in range(index, len(self))]
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
if row_index > col_index:
return self.matrix[row_index][col_index]
else:
return self.matrix[col_index][row_index]
else:
raise TypeError("Invalid index type.")
def __setitem__(self, item, value):
"""Set value by the index(s) or name(s).
Similar to __getitem__::
dm[1] = [1, 0, 3, 4] set values from '1' to others;
dm[i, j] = 2 set the value from 'i' to 'j'
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if isinstance(value, list) and all(_is_numeric(n) for n in value):
if len(value) == len(self):
for i in range(0, index):
self.matrix[index][i] = value[i]
for i in range(index, len(self)):
self.matrix[i][index] = value[i]
else:
raise ValueError("Value not the same size.")
else:
raise TypeError("Invalid value type.")
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if _is_numeric(value):
if row_index > col_index:
self.matrix[row_index][col_index] = value
else:
self.matrix[col_index][row_index] = value
else:
raise TypeError("Invalid value type.")
else:
raise TypeError("Invalid index type.")
def __delitem__(self, item):
"""Delete related distances by the index or name"""
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
index = self.names.index(item)
else:
raise TypeError("Invalid index type.")
# remove distances related to index
for i in range(index + 1, len(self)):
del self.matrix[i][index]
del self.matrix[index]
# remove name
del self.names[index]
def insert(self, name, value, index=None):
"""Insert distances given the name and value.
:Parameters:
name : str
name of a row/col to be inserted
value : list
a row/col of values to be inserted
"""
if isinstance(name, str):
# insert at the given index or at the end
if index is None:
index = len(self)
if not isinstance(index, int):
raise TypeError("Invalid index type.")
# insert name
self.names.insert(index, name)
# insert elements of 0, to be assigned
self.matrix.insert(index, [0] * index)
for i in range(index, len(self)):
self.matrix[i].insert(index, 0)
# assign value
self[index] = value
else:
raise TypeError("Invalid name type.")
def __len__(self):
"""Matrix length"""
return len(self.names)
def __repr__(self):
return self.__class__.__name__ \
+ "(names=%s, matrix=%s)" \
% tuple(map(repr, (self.names, self.matrix)))
def __str__(self):
"""Get a lower triangular matrix string"""
matrix_string = '\n'.join(
[self.names[i] + "\t" + "\t".join([str(n) for n in self.matrix[i]])
for i in range(0, len(self))])
matrix_string = matrix_string + "\n\t" + "\t".join(self.names)
return matrix_string
class _DistanceMatrix(_Matrix):
"""Distance matrix class that can be used for distance based tree algorithms.
All diagonal elements will be zero no matter what the users provide.
"""
def __init__(self, names, matrix=None):
_Matrix.__init__(self, names, matrix)
self._set_zero_diagonal()
def __setitem__(self, item, value):
_Matrix.__setitem__(self, item, value)
self._set_zero_diagonal()
def _set_zero_diagonal(self):
"""set all diagonal elements to zero"""
for i in range(0, len(self)):
self.matrix[i][i] = 0
class DistanceCalculator(object):
"""Class to calculate the distance matrix from a DNA or Protein
Multiple Sequence Alignment(MSA) and the given name of the
substitution model.
Currently only scoring matrices are used.
:Parameters:
model : str
Name of the model matrix to be used to calculate distance.
The attribute `dna_matrices` contains the available model
names for DNA sequences and `protein_matrices` for protein
sequences.
Example
-------
>>> from Bio.Phylo.TreeConstruction import DistanceCalculator
>>> from Bio import AlignIO
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print aln
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
DNA calculator with 'identity' model::
>>> calculator = DistanceCalculator('identity')
>>> dm = calculator.get_distance(aln)
>>> print dm
Alpha 0
Beta 0.230769230769 0
Gamma 0.384615384615 0.230769230769 0
Delta 0.538461538462 0.538461538462 0.538461538462 0
Epsilon 0.615384615385 0.384615384615 0.461538461538 0.153846153846 0
Alpha Beta Gamma Delta Epsilon
Protein calculator with 'blosum62' model::
>>> calculator = DistanceCalculator('blosum62')
>>> dm = calculator.get_distance(aln)
>>> print dm
Alpha 0
Beta 0.369047619048 0
Gamma 0.493975903614 0.25 0
Delta 0.585365853659 0.547619047619 0.566265060241 0
Epsilon 0.7 0.355555555556 0.488888888889 0.222222222222 0
Alpha Beta Gamma Delta Epsilon
"""
dna_alphabet = ['A', 'T', 'C', 'G']
# BLAST nucleic acid scoring matrix
blastn = [[5],
[-4, 5],
[-4, -4, 5],
[-4, -4, -4, 5]]
# transition/transversion scoring matrix
trans = [[6],
[-5, 6],
[-5, -1, 6],
[-1, -5, -5, 6]]
protein_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y',
'Z']
# matrices available
dna_matrices = {'blastn': blastn, 'trans': trans}
protein_models = MatrixInfo.available_matrices
protein_matrices = dict((name, getattr(MatrixInfo, name))
for name in protein_models)
dna_models = list(dna_matrices.keys())
models = ['identity'] + dna_models + protein_models
def __init__(self, model='identity'):
"""Initialize with a distance model"""
if model == 'identity':
self.scoring_matrix = None
elif model in self.dna_models:
self.scoring_matrix = _Matrix(self.dna_alphabet,
self.dna_matrices[model])
elif model in self.protein_models:
self.scoring_matrix = self._build_protein_matrix(
self.protein_matrices[model])
else:
raise ValueError("Model not supported. Available models: "
+ ", ".join(self.models))
def _pairwise(self, seq1, seq2):
"""Calculate pairwise distance from two sequences"""
score = 0
max_score = 0
if self.scoring_matrix:
max_score1 = 0
max_score2 = 0
skip_letters = ['-', '*']
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 in skip_letters or l2 in skip_letters:
continue
if l1 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l1, seq1.id, i))
if l2 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l2, seq2.id, i))
max_score1 += self.scoring_matrix[l1, l1]
max_score2 += self.scoring_matrix[l2, l2]
score += self.scoring_matrix[l1, l2]
max_score = max_score1 > max_score2 and max_score1 or max_score2
else:
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 == l2:
score += 1
max_score = len(seq1)
return 1 - (score * 1.0 / max_score)
def get_distance(self, msa):
"""Return a _DistanceMatrix for MSA object
:Parameters:
msa : MultipleSeqAlignment
DNA or Protein multiple sequence alignment.
"""
if not isinstance(msa, MultipleSeqAlignment):
raise TypeError("Must provide a MultipleSeqAlignment object.")
names = [s.id for s in msa]
dm = _DistanceMatrix(names)
for seq1, seq2 in itertools.combinations(msa, 2):
dm[seq1.id, seq2.id] = self._pairwise(seq1, seq2)
return dm
def _build_protein_matrix(self, subsmat):
"""Convert matrix from SubsMat format to _Matrix object"""
protein_matrix = _Matrix(self.protein_alphabet)
for k, v in subsmat.items():
aa1, aa2 = k
protein_matrix[aa1, aa2] = v
return protein_matrix
class TreeConstructor(object):
"""Base class for all tree constructor."""
def build_tree(self, msa):
"""Caller to built the tree from a MultipleSeqAlignment object.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class DistanceTreeConstructor(TreeConstructor):
"""Distance based tree constructor.
:Parameters:
method : str
Distance tree construction method, 'nj'(default) or 'upgma'.
distance_calculator : DistanceCalculator
The distance matrix calculator for multiple sequence alignment.
It must be provided if `build_tree` will be called.
Example
--------
>>> from TreeConstruction import DistanceTreeConstructor
>>> constructor = DistanceTreeConstructor()
UPGMA Tree:
>>> upgmatree = constructor.upgma(dm)
>>> print upgmatree
Tree(rooted=True)
Clade(name='Inner4')
Clade(branch_length=0.171955155115, name='Inner1')
Clade(branch_length=0.111111111111, name='Epsilon')
Clade(branch_length=0.111111111111, name='Delta')
Clade(branch_length=0.0673103855608, name='Inner3')
Clade(branch_length=0.0907558806655, name='Inner2')
Clade(branch_length=0.125, name='Gamma')
Clade(branch_length=0.125, name='Beta')
Clade(branch_length=0.215755880666, name='Alpha')
NJ Tree:
>>> njtree = constructor.nj(dm)
>>> print njtree
Tree(rooted=False)
Clade(name='Inner3')
Clade(branch_length=0.0142054862889, name='Inner2')
Clade(branch_length=0.239265540676, name='Inner1')
Clade(branch_length=0.0853101915988, name='Epsilon')
Clade(branch_length=0.136912030623, name='Delta')
Clade(branch_length=0.292306275042, name='Alpha')
Clade(branch_length=0.0747705106139, name='Beta')
Clade(branch_length=0.175229489386, name='Gamma')
"""
methods = ['nj', 'upgma']
def __init__(self, distance_calculator=None, method="nj"):
if (distance_calculator is None
or isinstance(distance_calculator, DistanceCalculator)):
self.distance_calculator = distance_calculator
else:
raise TypeError("Must provide a DistanceCalculator object.")
if isinstance(method, str) and method in self.methods:
self.method = method
else:
raise TypeError("Bad method: " + method +
". Available methods: " + ", ".join(self.methods))
def build_tree(self, msa):
if self.distance_calculator:
dm = self.distance_calculator.get_distance(msa)
tree = None
if self.method == 'upgma':
tree = self.upgma(dm)
else:
tree = self.nj(dm)
return tree
else:
raise TypeError("Must provide a DistanceCalculator object.")
def upgma(self, distance_matrix):
"""Construct and return an UPGMA tree.
Constructs and returns an Unweighted Pair Group Method
with Arithmetic mean (UPGMA) tree.
:Parameters:
distance_matrix : _DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, _DistanceMatrix):
raise TypeError("Must provide a _DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 1:
min_dist = dm[1, 0]
# find minimum index
for i in range(1, len(dm)):
for j in range(0, i):
if min_dist >= dm[i, j]:
min_dist = dm[i, j]
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
if clade1.is_terminal():
clade1.branch_length = min_dist * 1.0 / 2
else:
clade1.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade1)
if clade2.is_terminal():
clade2.branch_length = min_dist * 1.0 / 2
else:
clade2.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade2)
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k]) * 1.0 / 2
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
inner_clade.branch_length = 0
return BaseTree.Tree(inner_clade)
def nj(self, distance_matrix):
"""Construct and return an Neighbor Joining tree.
:Parameters:
distance_matrix : _DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, _DistanceMatrix):
raise TypeError("Must provide a _DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init node distance
node_dist = [0] * len(dm)
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 2:
# calculate nodeDist
for i in range(0, len(dm)):
node_dist[i] = 0
for j in range(0, len(dm)):
node_dist[i] += dm[i, j]
node_dist[i] = node_dist[i] / (len(dm) - 2)
# find minimum distance pair
min_dist = dm[1, 0] - node_dist[1] - node_dist[0]
min_i = 0
min_j = 1
for i in range(1, len(dm)):
for j in range(0, i):
temp = dm[i, j] - node_dist[i] - node_dist[j]
if min_dist > temp:
min_dist = temp
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
clade1.branch_length = (dm[min_i, min_j] + node_dist[min_i]
- node_dist[min_j]) / 2.0
clade2.branch_length = dm[min_i, min_j] - clade1.branch_length
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k]
- dm[min_i, min_j]) / 2.0
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
# set the last clade as one of the child of the inner_clade
root = None
if clades[0] == inner_clade:
clades[0].branch_length = 0
clades[1].branch_length = dm[1, 0]
clades[0].clades.append(clades[1])
root = clades[0]
else:
clades[0].branch_length = dm[1, 0]
clades[1].branch_length = 0
clades[1].clades.append(clades[0])
root = clades[1]
return BaseTree.Tree(root, rooted=False)
def _height_of(self, clade):
"""calculate clade height -- the longest path to any terminal."""
height = 0
if clade.is_terminal():
height = clade.branch_length
else:
height = height + max([self._height_of(c) for c in clade.clades])
return height
# #################### Tree Scoring and Searching Classes #####################
class Scorer(object):
"""Base class for all tree scoring methods"""
def get_score(self, tree, alignment):
"""Caller to get the score of a tree for the given alignment.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class TreeSearcher(object):
"""Base class for all tree searching methods"""
def search(self, starting_tree, alignment):
"""Caller to search the best tree with a starting tree.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class NNITreeSearcher(TreeSearcher):
"""Tree searching with Nearest Neighbor Interchanges (NNI) algorithm.
:Parameters:
scorer : ParsimonyScorer
parsimony scorer to calculate the parsimony score of
different trees during NNI algorithm.
"""
def __init__(self, scorer):
if isinstance(scorer, Scorer):
self.scorer = scorer
else:
raise TypeError("Must provide a Scorer object.")
def search(self, starting_tree, alignment):
"""Implement the TreeSearcher.search method.
:Parameters:
starting_tree : Tree
starting tree of NNI method.
alignment : MultipleSeqAlignment
multiple sequence alignment used to calculate parsimony
score of different NNI trees.
"""
return self._nni(starting_tree, alignment)
def _nni(self, starting_tree, alignment):
"""Search for the best parsimony tree using the NNI algorithm."""
best_tree = starting_tree
while True:
best_score = self.scorer.get_score(best_tree, alignment)
temp = best_score
for t in self._get_neighbors(best_tree):
score = self.scorer.get_score(t, alignment)
if score < best_score:
best_score = score
best_tree = t
# stop if no smaller score exist
if best_score >= temp:
break
return best_tree
def _get_neighbors(self, tree):
"""Get all neighbor trees of the given tree.
Currently only for binary rooted trees.
"""
# make child to parent dict
parents = {}
for clade in tree.find_clades():
if clade != tree.root:
node_path = tree.get_path(clade)
# cannot get the parent if the parent is root. Bug?
if len(node_path) == 1:
parents[clade] = tree.root
else:
parents[clade] = node_path[-2]
neighbors = []
root_childs = []
for clade in tree.get_nonterminals(order="level"):
if clade == tree.root:
left = clade.clades[0]
right = clade.clades[1]
root_childs.append(left)
root_childs.append(right)
if not left.is_terminal() and not right.is_terminal():
# make changes around the left_left clade
# left_left = left.clades[0]
left_right = left.clades[1]
right_left = right.clades[0]
right_right = right.clades[1]
# neightbor 1 (left_left + right_right)
del left.clades[1]
del right.clades[1]
left.clades.append(right_right)
right.clades.append(left_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (left_left + right_left)
del left.clades[1]
del right.clades[0]
left.clades.append(right_left)
right.clades.append(right_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (left_left + left_right)
del left.clades[1]
del right.clades[0]
left.clades.append(left_right)
right.clades.insert(0, right_left)
elif clade in root_childs:
# skip root child
continue
else:
# method for other clades
# make changes around the parent clade
left = clade.clades[0]
right = clade.clades[1]
parent = parents[clade]
if clade == parent.clades[0]:
sister = parent.clades[1]
# neighbor 1 (parent + right)
del parent.clades[1]
del clade.clades[1]
parent.clades.append(right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(sister)
clade.clades.insert(0, left)
else:
sister = parent.clades[0]
# neighbor 1 (parent + right)
del parent.clades[0]
del clade.clades[1]
parent.clades.insert(0, right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, sister)
clade.clades.insert(0, left)
return neighbors
# ######################## Parsimony Classes ##########################
class ParsimonyScorer(Scorer):
"""Parsimony scorer with a scoring matrix.
This is a combination of Fitch algorithm and Sankoff algorithm.
See ParsimonyTreeConstructor for usage.
:Parameters:
matrix : _Matrix
scoring matrix used in parsimony score calculation.
"""
def __init__(self, matrix=None):
if not matrix or isinstance(matrix, _Matrix):
self.matrix = matrix
else:
raise TypeError("Must provide a _Matrix object.")
def get_score(self, tree, alignment):
"""Calculate and return the parsimony score given a tree and
the MSA using the Fitch algorithm without the penalty matrix
the Sankoff algorithm with the matrix"""
# make sure the tree is rooted and bifurcating
if not tree.is_bifurcating():
raise ValueError("The tree provided should be bifurcating.")
if not tree.rooted:
tree.root_at_midpoint()
# sort tree terminals and alignment
terms = tree.get_terminals()
terms.sort(key=lambda term: term.name)
alignment.sort()
if not all([t.name == a.id for t, a in zip(terms, alignment)]):
raise ValueError(
"Taxon names of the input tree should be the same with the alignment.")
# term_align = dict(zip(terms, alignment))
score = 0
for i in range(len(alignment[0])):
# parsimony score for column_i
score_i = 0
# get column
column_i = alignment[:, i]
# skip non-informative column
if column_i == len(column_i) * column_i[0]:
continue
# start calculating score_i using the tree and column_i
# Fitch algorithm without the penalty matrix
if not self.matrix:
# init by mapping terminal clades and states in column_i
clade_states = dict(zip(terms, [set([c]) for c in column_i]))
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_state = clade_states[clade_childs[0]]
right_state = clade_states[clade_childs[1]]
state = left_state & right_state
if not state:
state = left_state | right_state
score_i = score_i + 1
clade_states[clade] = state
# Sankoff algorithm with the penalty matrix
else:
inf = float('inf')
# init score arrays for terminal clades
alphabet = self.matrix.names
length = len(alphabet)
clade_scores = {}
for j in range(len(column_i)):
array = [inf] * length
index = alphabet.index(column_i[j])
array[index] = 0
clade_scores[terms[j]] = array
# bottom up calculation
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_score = clade_scores[clade_childs[0]]
right_score = clade_scores[clade_childs[1]]
array = []
for m in range(length):
min_l = inf
min_r = inf
for n in range(length):
sl = self.matrix[
alphabet[m], alphabet[n]] + left_score[n]
sr = self.matrix[
alphabet[m], alphabet[n]] + right_score[n]
if min_l > sl:
min_l = sl
if min_r > sr:
min_r = sr
array.append(min_l + min_r)
clade_scores[clade] = array
# minimum from root score
score_i = min(array)
# TODO: resolve internal states
score = score + score_i
return score
class ParsimonyTreeConstructor(TreeConstructor):
"""Parsimony tree constructor.
:Parameters:
searcher : TreeSearcher
tree searcher to search the best parsimony tree.
starting_tree : Tree
starting tree provided to the searcher.
Example
--------
>>> from Bio import AlignIO
>>> from TreeConstruction import *
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print aln
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
>>> starting_tree = Phylo.read('Tests/TreeConstruction/nj.tre', 'newick')
>>> print tree
Tree(weight=1.0, rooted=False)
Clade(branch_length=0.0, name='Inner3')
Clade(branch_length=0.01421, name='Inner2')
Clade(branch_length=0.23927, name='Inner1')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.29231, name='Alpha')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.17523, name='Gamma')
>>> from TreeConstruction import *
>>> scorer = ParsimonyScorer()
>>> searcher = NNITreeSearcher(scorer)
>>> constructor = ParsimonyTreeConstructor(searcher, starting_tree)
>>> pars_tree = constructor.build_tree(aln)
>>> print pars_tree
Tree(weight=1.0, rooted=True)
Clade(branch_length=0.0)
Clade(branch_length=0.197335, name='Inner1')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.041935, name='Inner2')
Clade(branch_length=0.01421, name='Inner3')
Clade(branch_length=0.17523, name='Gamma')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.29231, name='Alpha')
"""
def __init__(self, searcher, starting_tree=None):
self.searcher = searcher
self.starting_tree = starting_tree
def build_tree(self, alignment):
"""Build the tree.
:Parameters:
alignment : MultipleSeqAlignment
multiple sequence alignment to calculate parsimony tree.
"""
# if starting_tree is none,
# create a upgma tree with 'identity' scoring matrix
if self.starting_tree is None:
dtc = DistanceTreeConstructor(DistanceCalculator("identity"),
"upgma")
self.starting_tree = dtc.build_tree(alignment)
return self.searcher.search(self.starting_tree, alignment)
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Phylo/TreeConstruction.py
|
Python
|
gpl-2.0
| 39,491
|
[
"BLAST",
"Biopython"
] |
3b7ffd5406c1236b6f67d4467a401d95988453129ec2d14975706d41702e283b
|
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
requires_core = [
'Twisted[tls]==18.9.0',
'pyOpenSSL>=16.2.0',
]
if sys.platform == 'darwin':
requires_core = [
'Twisted==18.9.0',
]
requires = requires_core + [
# Core
#'Twisted[tls]==17.1.0',
#'Twisted==17.1.0',
'six==1.14.0',
'pyramid==1.5.7', # 1.9.1
'pyramid_jinja2==2.5', # 2.7
'cornice==1.0.0', # 2.4.0, 3.1.0
'simplejson==3.8.2', # 3.13.2
'Jinja2==2.8', # 2.10
'Bunch==1.0.1',
'appdirs==1.4.3',
'json-store==2.1',
'python-dateutil>=2.6.0', # 2.7.0
'arrow==0.10.0', # 0.12.1
'funcy==1.7.2', # 1.10.1
'attrs==19.2.0',
# Bus adapters
#'twisted-mqtt==0.1.4', # 0.2.1
'paho-mqtt==1.2', # 1.4.0
'autobahn[twisted]==0.13.0', # 18.3.1
'msgpack-python==0.4.7', # 0.5.6
'PyTrie==0.3.1',
# Misc
#'setuptools==40.8.0',
'distlib==0.2.8',
'docopt==0.6.2',
# More dependencies
'cryptography>=1.3.4',
'certifi>=2016.2.28',
#'pyasn1>=0.2.3', # 0.4.2
'service_identity==18.1.0',
'idna==2.8',
'pyasn1==0.4.5',
'pyasn1-modules==0.2.4',
]
extras = {
'daq': [
'influxdb==5.3.0',
'pytz==2018.9',
'requests>=2.12.4',
#'requests>=2.18.4,<3',
'grafana_api_client==0.2.0',
#'grafana-dashboard-builder==0.1.0a7', # evaluated, but not suitable
#'txmongo==16.3.0',
'pymongo==3.4.0', # 3.6.1
],
'daq_geospatial': [
'Geohash==1.0',
'geopy==1.12.0', # 1.17.0
'Beaker==1.9.0', # 1.10.0
'tqdm==4.19.8', # 4.28.1
],
'daq_binary': [
'pycparser==2.17', # 2.18
'pyparsing==2.2.0',
'pyclibrary==0.1.3',
'tabulate==0.7.5', # 0.8.2
'sympy==0.7.6.1', # 1.1.1
],
'storage_plus': [
'alchimia==0.4', # 0.8.1
],
# Data export: Basic formats
'export': [
'pyinfluxql==0.0.1',
'pandas==0.18.1', # 0.24.5
'numpy>=1.8.2,<2', # 1.14.2
'XlsxWriter==0.9.2', # 1.0.2
],
'plotting': [
#'dyplot==0.8.8',
# sudo port install py27-matplotlib
'matplotlib==2.0.0', # 2.2.2
#'cairocffi>=0.5.4',
'bokeh==0.12.4', # 0.12.14
'vincent==0.4.4',
],
# Data export: Scientific data formats like HDF5 and NetCDF and plots from ggplot
'scientific': [
# Data
# ----
# HDF5
# "PyTables" requires HDF5 libraries
# sudo port install hdf5
'tables>=3.1.1', # 3.4.2
# NetCDF (Network Common Data Form)
'xarray==0.7.2', # 0.10.2
# sudo port install netcdf
'netCDF4>=1.2.7', # 1.3.1
#'h5netcdf==0.2.2',
# Algorithms
# ----------
# sudo port install py27-scipy
'scipy>=0.14.0', # 1.0.0
'ggplot==0.9.7', # 0.11.5
# gfortran
# aptitude install libatlas-base-dev lapack-dev gfortran or
# https://gcc.gnu.org/wiki/GFortranBinaries
# Visualization
# -------------
#'seaborn==0.7.1',
],
'firmware': [
'GitPython==2.0.5', # 2.1.8
'plumbum==1.6.1.post0', # 1.6.6
],
}
setup(name='kotori',
version='0.24.5',
description='Kotori is a data acquisition, processing and graphing toolkit for humans',
long_description=README,
license="AGPL 3, EUPL 1.2",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License v3",
"License :: OSI Approved :: European Union Public Licence 1.2 (EUPL 1.2)",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Web Environment",
"Framework :: Pyramid",
"Framework :: Twisted",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Information Technology",
"Intended Audience :: Manufacturing",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"Topic :: Communications",
"Topic :: Database",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Scientific/Engineering :: Human Machine Interfaces",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Embedded Systems",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Object Brokering",
"Topic :: System :: Archiving",
"Topic :: System :: Networking :: Monitoring",
"Topic :: Text Processing",
"Topic :: Utilities",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
author='Andreas Motl',
author_email='andreas.motl@getkotori.org',
url='https://github.com/daq-tools/kotori',
keywords='data acquisition graphing export plotting daq routing engine ' +
'mqtt http rest amqp wamp sql web html csv json cdf hdf5 png ' +
'twisted pyramid autobahn influxdb mosquitto grafana mongodb matplotlib ggplot ' +
'telemetry m2m iot',
packages=find_packages(),
include_package_data=True,
package_data={
'kotori': [
'daq/graphing/grafana/resources/*.json',
'io/export/*.html',
],
},
zip_safe=False,
test_suite='kotori.test',
install_requires=requires,
extras_require=extras,
dependency_links=[
'https://github.com/jjmalina/pyinfluxql/tarball/d92db4ab8c#egg=pyinfluxql-0.0.1',
],
entry_points={
'console_scripts': [
'kotori = kotori:run',
#'kotori-master = kotori.master.server:run',
#'kotori-node = kotori.node.nodeservice:run',
#'kotori-wamp-client = kotori.master.client:run_wamp_client',
'h2m-csv-udp-client = kotori.vendor.hydro2motion.client:run_udp_client',
'h2m-csv-udp-fuzzer = kotori.vendor.hydro2motion.client:run_udp_fuzzer',
'lst-message = kotori.vendor.lst.shell:message',
'kotori-selftest = kotori.vendor.selftest:run',
],
'paste.app_factory': [
'main = kotori.frontend.app:main',
],
},
)
|
zerotired/kotori
|
setup.py
|
Python
|
agpl-3.0
| 7,419
|
[
"NetCDF"
] |
998e8b214784b4bfe5b8b8a98875b17f470392f6dd99508d9eb757a33e64de2a
|
#!/usr/bin/env python
""" load mWGS Raw Seq Set into OSDF using info from data file """
import os
import re
from cutlass.HostTranscriptomicsRawSeqSet import HostTranscriptomicsRawSeqSet
import settings
from cutlass_utils import \
load_data, get_parent_node_id, list_tags, format_query, \
write_csv_headers, values_to_node_dict, write_out_csv, \
load_node, get_field_header, dump_args, log_it
filename = os.path.basename(__file__)
log = log_it(filename)
# the Higher-Ups
node_type = 'HostTranscriptomicsRawSeqSet'
parent_type = 'HostSeqPrep'
grand_parent_type = 'Sample'
great_parent_type = 'Visit'
great_great1_type = 'Subject'
great_great2_type = 'Study'
node_tracking_file = settings.node_id_tracking.path
class node_values:
study = ''
comment = ''
sequence_type = ''
seq_model = ''
format = ''
format_doc = ''
exp_length = ''
local_file = ''
checksums = ''
size = ''
tags = []
urls = []
def load(internal_id, search_field):
"""search for existing node to update, else create new"""
# node-specific variables:
NodeTypeName = node_type #'HostTranscriptomicsRawSeqSet'
NodeLoadFunc = 'load_host_transcriptomics_raw_seq_set'
return load_node(internal_id, search_field, NodeTypeName, NodeLoadFunc)
def validate_record(parent_id, node, record, data_file_name=node_type):
"""update record fields
validate node
if valid, save, if not, return false
"""
log.info("in validate/save: "+node_type)
csv_fieldnames = get_field_header(data_file_name)
write_csv_headers(data_file_name,fieldnames=csv_fieldnames)
node.study = 'prediabetes'
node.comment = record['host_transcriptomics_id']
node.sequence_type = 'nucleotide'
node.seq_model = ' '
node.format = 'fastq'
node.format_doc = 'https://en.wikipedia.org/wiki/FASTQ_format'
node.exp_length = 0 #record['exp_length']
node.urls = {record['local_file_path']}
node.checksums = {'md5':record['md5'], 'sha256':record['sha']}
node.size = int(record['SIZE'])
#node.tags = list_tags(node.tags,
# 'sample name: ' + record['visit_id'],
# 'body site: ' + record['body_site'],
# 'visit id: ' + record['visit_id'],
# 'subject id: ' + record['rand_subject_id'],
# 'file prefix: ' + record['sample_name_id'] + '.hostseqprep',
# 'file name: ' + record['local_file'],
# 'sub-group: ' + record['subtype'],
# )
parent_link = {'sequenced_from':[parent_id]}
log.debug('parent_id: '+str(parent_link))
node.links = parent_link
csv_fieldnames = get_field_header(data_file_name)
if not node.is_valid():
write_out_csv(data_file_name+'_invalid_records.csv',
fieldnames=csv_fieldnames, values=[record,])
invalidities = node.validate()
err_str = "Invalid {}!\n\t{}".format(node_type, str(invalidities))
log.error(err_str)
# raise Exception(err_str)
elif node.save():
write_out_csv(data_file_name+'_submitted.csv',
fieldnames=csv_fieldnames, values=[record,])
return node
else:
write_out_csv(data_file_name+'_unsaved_records.csv',
fieldnames=csv_fieldnames, values=[record,])
return False
def submit(data_file, id_tracking_file=node_tracking_file):
log.info('Starting submission of %ss.', node_type)
nodes = []
csv_fieldnames = get_field_header(data_file)
write_csv_headers(data_file,fieldnames=csv_fieldnames)
for record in load_data(data_file):
log.info('...next record...')
try:
log.debug('data record: '+str(record))
# node-specific variables:
load_search_field = 'comment'
internal_id = str(record['host_transcriptomics_id']) +'.host_transcriptomics'
parent_internal_id = record['host_seq_prep_name_id'] ##Link to Host_seq_prep ID
grand_parent_internal_id = record['sample_name_id'] ##Link to Sample ID
parent_id = get_parent_node_id(
id_tracking_file, parent_type, parent_internal_id)
log.debug('matched parent_id: %s', parent_id)
if parent_id:
node_is_new = False # set to True if newbie
node = load(internal_id, load_search_field)
if not getattr(node, load_search_field):
log.debug('loaded node newbie...')
node_is_new = True
import pdb ; pdb.set_trace()
saved = validate_record(parent_id, node, record,
data_file_name=data_file)
if saved:
header = settings.node_id_tracking.id_fields
saved_name = getattr(saved, load_search_field)
vals = values_to_node_dict(
[[node_type.lower(),saved_name,saved.id,
parent_type.lower(),parent_internal_id,parent_id]],
header
)
nodes.append(vals)
if node_is_new:
write_out_csv(id_tracking_file,
fieldnames=get_field_header(id_tracking_file),
values=vals)
else:
log.error('No parent_id found for %s', parent_internal_id)
except Exception, e:
log.exception(e)
raise e
return nodes
if __name__ == '__main__':
pass
|
JAX-GM/osdf_submit
|
nodes/host_TranscriptomicsRawSeqSet.py
|
Python
|
gpl-3.0
| 5,813
|
[
"VisIt"
] |
9251c8a5b65f755f8cd3d7d97f7dc7a59f30162081bd9e1393e7c6e405b06c40
|
"""Configuration data for the "activation maximization" (am) module.
"""
from base import Config as BaseConfig
from dltb.base.observer import change
class Config(BaseConfig,
changes={'network_changed', 'layer_changed', 'unit_changed',
'config_changed'}):
"""Configuration data for the "activation maximization" (am) module.
"""
_config = {
'UNIT_INDEX': {
'default': 0,
'doc': 'The unit to maximize.',
'change': 'unit_changed'
},
'NETWORK_KEY': {
'default': '',
'doc': 'The name of the network in which the unit to maximize '
'is found. Needs to be a key that allows to get '
'the network.',
'change': 'network_changed'
},
'LAYER_KEY': {
'default': '',
'doc': 'The name of the layer in which the unit to maximize '
'is found. Needs to be a key that allows to get '
'the layer from a network object.',
'change': 'layer_changed'
},
'RANDOMIZE_INPUT': {
'default': True,
'doc': 'maximization is initialized with random input'
'or flat colored image'
},
'ETA': {
'default': 500,
'doc': 'learning rate'
},
'BLUR_ACTIVATED': {
'default': True,
'doc': 'apply Gaussian blur'
},
'BLUR_KERNEL': {
'default': (3, 3),
'doc': 'Gaussian blur parameter'
},
'BLUR_SIGMA': {
'default': 0,
'doc': 'Gaussian blur parameter'
},
'BLUR_FREQUENCY': {
'default': 5,
'doc': 'how many steps between two blurs. paper used 4.'
},
'L2_ACTIVATED': {
'default': True,
'doc': 'Apply L2 decay'
},
'L2_LAMBDA': {
'default': 0.000001,
'doc': 'L2 decay parameter, totally arbitrarily chosen'
},
'NORM_CLIPPING_ACTIVATED': {
'default': False,
'doc': 'apply low norm pixel pixel clipping'
},
'NORM_CLIPPING_FREQUENCY': {
'default': 25,
'doc': 'how many steps between pixel clippings'
},
'NORM_PERCENTILE': {
'default': 30,
'doc': 'how many of the pixels are clipped'
},
'CONTRIBUTION_CLIPPING_ACTIVATED': {
'default': True,
'doc': 'Apply low contribution pixel clipping'
},
'CONTRIBUTION_CLIPPING_FREQUENCY': {
'default': 50,
'doc': 'how many steps between pixel clippings'
},
'CONTRIBUTION_PERCENTILE': {
'default': 15,
'doc': 'how many of the pixels are clipped'
},
'BORDER_REG_ACTIVATED': {
'default': True,
'doc': 'Activate border regularizer: this punishes high pixels'
' the more distant they are from the image center.'
},
'BORDER_FACTOR': {
'default': 0.000003,
'doc': 'Instead of summing up the product of the actual pixel '
'to center distance and its value (too strong), '
'the effect is changed by multiplying each of the '
'resultant values with this factor.'
},
'BORDER_EXP': {
'default': 1.5,
'doc': 'The higher this factor, the stronger the distance '
'from center is punished.'
},
'LARGER_IMAGE': {
'default': False,
'doc': 'now called upscaling in thesis'
},
'IMAGE_SIZE_X': {
'default': 350,
'doc': ''
},
'IMAGE_SIZE_Y': {
'default': 350,
'doc': ''
},
'JITTER': {
'default': False,
'doc': 'Jittering causes the input image to be (randomly) '
'shifted by a small offset in horizontal and vertical '
'direction in every iteration. The idea is to achieve '
'a result that is invariant to such translations.'
},
'JITTER_STRENGTH': {
'default': 5,
'doc': 'The amount of jitter to use. In every iteration the '
'amount horizontal and vertical shift is randomly '
'choosen from the interval '
'[-_JITTER_STRENGTH, _JITTER_STRENGTH].'
},
'WRAP_AROUND': {
'default': True,
'doc': 'Pixels beyond the image borders in '
'upscaling/jitter are updated via wrap-around'
},
'LOSS_STOP': {
'default': True,
'doc': 'Stop the maximization process when the loss goal '
'is reached. Turn this off if you want to ignore '
'the loss goal.'
},
'LOSS_GOAL': {
'default': 0.01,
'doc': 'convergence parameter. relative(!) difference '
'between loss and last 50 losses to converge to'
},
'LOSS_COUNT': {
'default': 100,
'doc': ''
},
'MAX_STEPS': {
'default': 2000,
'doc': 'how many steps to maximally take when optimizing an image'
},
'TENSORBOARD_ACTIVATED': {
'default': False,
'doc': 'switch on and off the logging of the image and loss'
},
'NORMALIZE_OUTPUT': {
'default': True,
'doc': 'whether to save output image normalized'
}
}
# FIXME[hack]: set preferred parameters for specific networks
@change
def _helper_setattr(self, name, value):
super()._helper_setattr(name, value)
if name == 'NETWORK_KEY':
if self.NETWORK_KEY == 'AlexNet':
# self.LAYER_KEY = 'fc8' # default for AlexNet
# -> does not exist ... use 'dense_3'
# "dense_3" -> "strided_slice_3:0"
# while 'xw_plus_b:0' -> "strided_slice_4:0"
self.LAYER_KEY = "dense_3"
self.UNIT_INDEX = 947
|
Petr-By/qtpyvis
|
tools/am/config.py
|
Python
|
mit
| 6,307
|
[
"Gaussian"
] |
3e064d0c021c402cbf721438917541b4d5f46272925416c51b07b2e021ba175e
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
squidy =\
"""
:vzW:.
:9#@M8v:. .#MMMMMMM#A;. .:i,.
:vv. tMMMMMM.#MM@YivBMMMMMMMMMMMM@MMMM#Qbz1X
..,YEMQ, 7MMMMM#zi ... .ii..
... vEMMMb@E:c$MMMMMMMz
.i;7ZbIY:...UEBMMMMMMMMMMMM# ,MMMM.
.:zZ#MMMZMMMMMMMMMMMMMb iMMMMMMMMMMMMMMMMMMWt, YMMMMMM#
.... .:c71tt80oM@@MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM@
.ii7vt0MMMM09ZtnWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM,
...::cnWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMUMMMM@MMMMMMMc
.YUc .;o@M@ZXEM0zB@MM@MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM6
.YYtn9UIA$@6Z$W@E7:QMM0ZMMMMMMco; 1MMMMMMMMMMMMMMMMMMMMMMMMM#MMMMMMMM#
.. .t6MMMMMMMMMMMX .t0@MMMMM@I,.. iMMMMM$
;QMMMMMWS1tQMMMMM@; ibMi
.ivE@M0v. it@M@WMMM0c
.,;Y: .XE202i. .QM7
.:Yt2c. EMA
.... MM6 .:i;7z8Q#$EEz11Cv,.
#MMMMMMMMMMM@@#MMMMMMMM@@WEU7
Welcome to the Kraken distributed modelling environment
(c) Brett G. Olivier, Stellenbosch 2007-2017.
"""
octopussy =\
"""
.Yt90QW$EC.
.U@@@W8AzoI0MMW.
WMM@WEE88Z980W@MM@
MM$Q00bEbbbbEE0W$#MC
bMEbbbbbbbbb88bbE00@@
M$8bbZ8bb8bbbbbZZb0QM:
MBb8ZbbbbbbbbbbEbEEQ@U ..
M@bZ8EbbbbbbbbbbbbEE## tMMMM@Z;
7M8bbbbbbbbbbbbbbbbE@@ 6M@#@@MMMMi
.Mbb888ZbZZ88Zb8Z88bMZ iM@00#.
;M @#88bZ86WQEWW8Z88b6M, @B@M$
MM. UM88bb0#tY@,n@QZ8ZWM ;MBMB
MM@. .,i: .ME8b8Q$It@28B8886Mi . EBUZ@7
i MM@MMMMMM. MQb8Z8600b08ZbbA@1 tMM@QEM@90#M@
@ MMMM@MM#$M0 z@98b8888888bE9WW @M@@MM@$MMMM.
MS Y2,.,,,@QBMMZ0MM#@@A8bEEbZQE6ZZb@#@M .MWQ$W,...Y2.
#MI ;St YM@@MM@#$00WZ8bbb8E@@69Z#E0#M@ .##bZM. 2A, .0
E#MM; 6M@B$M XM$#BE60bZbEb98QWZ9B06Q@@MMM#M#E6IM. .8MM#..MMMMQ;CQ#
:M#@MMMM$@#Q#B 7@Q0W#@0ZZZ9ZZZA9bbM0E$EEQ#MM#B@M0MBWMMB$MMMMBB#MMM@b
.MMMMM@#0.;MW@M#@$9E0Z9Z660W9b889666WB089bQ#WB@@#M@#@#$#@@#$QEE@MMM8,.
.@M6 .MMz ,MM#BQ#@ZE@@BW##E88bb0$bUAU69b0MB622Q#7;QQb@MM1S@@@#7Q#.
. MM#@MI iY0WAb#Q8EW##QE0EZ87i80EA . . ..
Y21c. .EE8W.0#6AU#0. ;8b8ZQ#2.:U#@@@S
c@@@c $M@@@MM@09@i .Mb9EE@@$$QbEb690@M@W@M@MM
.MM@@MM06#MQ$@#WQ0EZM. 8MM .M@00#EMM@#B$@@MMMBMA
:tAWME0#@@@@B8@MMMBIW@@B MMQ QQQ0 cMMUZS. :i @Mo
.MMMMWZ6WZ.#M@W@W:E; :#i
7MBW@@$@@ ZMZt9
Welcome to the Kraken distributed modelling environment
(c) Brett G. Olivier, Stellenbosch 2007-2017.
"""
|
bgoli/pysces
|
pysces/kraken/startup.py
|
Python
|
bsd-3-clause
| 3,491
|
[
"PySCeS"
] |
082e794fa44e543ad85ca7282cde91c7addb271343b483277af122395fea7e86
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Traversing Python modules and classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.util import tf_inspect
__all__ = ['traverse']
def _traverse_internal(root, visit, stack, path):
"""Internal helper for traverse."""
# Only traverse modules and classes
if not tf_inspect.isclass(root) and not tf_inspect.ismodule(root):
return
try:
children = tf_inspect.getmembers(root)
except ImportError:
# On some Python installations, some modules do not support enumerating
# members (six in particular), leading to import errors.
children = []
new_stack = stack + [root]
visit(path, root, children)
for name, child in children:
# Do not descend into built-in modules
if tf_inspect.ismodule(
child) and child.__name__ in sys.builtin_module_names:
continue
# Break cycles
if any(child is item for item in new_stack): # `in`, but using `is`
continue
child_path = path + '.' + name if path else name
_traverse_internal(child, visit, new_stack, child_path)
def traverse(root, visit):
"""Recursively enumerate all members of `root`.
Similar to the Python library function `os.path.walk`.
Traverses the tree of Python objects starting with `root`, depth first.
Parent-child relationships in the tree are defined by membership in modules or
classes. The function `visit` is called with arguments
`(path, parent, children)` for each module or class `parent` found in the tree
of python objects starting with `root`. `path` is a string containing the name
with which `parent` is reachable from the current context. For example, if
`root` is a local class called `X` which contains a class `Y`, `visit` will be
called with `('Y', X.Y, children)`).
If `root` is not a module or class, `visit` is never called. `traverse`
never descends into built-in modules.
`children`, a list of `(name, object)` pairs are determined by
`tf_inspect.getmembers`. To avoid visiting parts of the tree, `children` can
be modified in place, using `del` or slice assignment.
Cycles (determined by reference equality, `is`) stop the traversal. A stack of
objects is kept to find cycles. Objects forming cycles may appear in
`children`, but `visit` will not be called with any object as `parent` which
is already in the stack.
Traversing system modules can take a long time, it is advisable to pass a
`visit` callable which blacklists such modules.
Args:
root: A python object with which to start the traversal.
visit: A function taking arguments `(path, parent, children)`. Will be
called for each object found in the traversal.
"""
_traverse_internal(root, visit, [], '')
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/tools/common/traverse.py
|
Python
|
apache-2.0
| 3,484
|
[
"VisIt"
] |
2e19636b7d0eabc7ab0fac5813a048095eaa7d626e3e9f51bb287d73ddf18b50
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Misura Language or Mini Language.
Secure minimal Python language subset for conditional evaluation of numerical datasets."""
import ast
from traceback import print_exc, format_exc
from time import time
from .env import BaseEnvironment, ExitException
from .objenv import InterfaceEnvironment, InstrumentEnvironment, KilnEnvironment
from .validator import Validator
try:
unicode('a')
except:
unicode=str
class MiLang(object):
"""Validation and execution of a script."""
whitelist = ['len', 'min', 'max', 'range', 'abs', 'print', 'isinstance']
"""Allowed callable object names."""
blacklist = ['exec', 'open', 'os', 'path', 'exit',
'sys', 'self', 'compile', 'getattr', 'setattr']
"""Forbidden callable objects."""
error = ""
"""Former compilation error."""
error_line = -1
"""Line where the error was found."""
error_col = -1
"""Column where the error begins."""
code = False
"""Codice compilato pronto all'esecuzione"""
tree = False
"""AST tree computed from the code"""
env = BaseEnvironment()
"""Data execution environment"""
obj_env = InterfaceEnvironment()
"""Output interface execution environment (sample or measurement)."""
ins_env = InstrumentEnvironment()
"""Current instrument execution environment"""
kiln_env = KilnEnvironment()
"""Kiln execution environment"""
measure_env = InterfaceEnvironment()
"""Measure metadata environment"""
script_env = InterfaceEnvironment()
"""Script originating environment (for accessing parameters, etc)"""
last = 0.
"""Last time this script was executed"""
period = 0
"""Execution period"""
handle = False
"""Handle of the option hosting this scipt"""
meta = False
"""Metadata option, where to record the output point"""
script = False
def __init__(self, script, env=False, obj_env=False, script_env=False):
if env:
self.env = env
self.prefix = getattr(env, 'prefix', '')
if obj_env:
self.obj_env = obj_env
if script_env:
self.script_env = script_env
self.set_script(script)
def __getstate__(self):
r = self.__dict__.copy()
r.pop('code')
r.pop('tree')
return r
def __setstate__(self, s):
map(lambda a: setattr(self, *a), s.items())
if self.script:
self.set_script(self.script)
def set_script(self, script):
val, tree = self.validate(script)
if val:
self.script = script
self.tree = tree
self.code = compile(tree, "<string>", "exec")
return True
else:
self.code = False
self.tree = False
print('Validation failed:', self.error)
return False
def val_name(self, fname):
if fname.startswith('_') or fname.endswith('_') or fname in self.blacklist:
return False
return True
def validate(self, script):
"""Script validation. Returns True if the compilation is enabled,
False if it is forbidden."""
# self.whitelist=self.env.whitelist+self.obj_env.whitelist+self.ins_env.whitelist
whitetree = {'names': self.whitelist,
'mi': {'names': set(self.env.whitelist)},
'obj': {'names': set(self.obj_env.whitelist)},
'ins': {'names': set(self.ins_env.whitelist)},
'kiln': {'names': set(self.kiln_env.whitelist)},
'measure': {'names': set(self.measure_env.whitelist)},
'script': {'names': set(self.script_env.whitelist)}}
# Something strange passed...
if not (isinstance(script, str) or isinstance(script, unicode)):
print('Wrong instance passed', type(script))
return False, False
c = ast.parse(script)
validator = Validator(whitetree, self.blacklist)
try:
validator.visit(c)
ok = True
except:
self.error = validator.error
self.error_line = validator.error_line
self.error_col = validator.error_col
ok = False
print_exc()
return ok, c
def set_env_outFile(self, hdf):
self.env.hdf = hdf
self.ins_env.hdf = hdf
self.obj_env.hdf = hdf
self.measure_env.hdf = hdf
self.kiln_env.hdf = hdf
self.script_env.hdf = hdf
def do(self):
"""Execute the code"""
t = time()
if (self.period is not None) and (t - self.last < self.period):
print('Not executing!')
return False
self.last = time()
self.env._reset()
mi = self.env
obj = self.obj_env
ins = self.ins_env
measure = self.measure_env
kiln = self.kiln_env
script = self.script_env
for env in (mi, obj, ins, measure, kiln, script):
env.handle = self.handle
# Definizione degli ambienti subordinati (sample, kiln, etc)
for s, e in self.env.sub.items():
m = "%s=mi.%s" % (s, e)
exec(m)
if self.code:
try:
exec(self.code)
except ExitException:
return False
except:
print('Error in ', self.handle, format_exc(), obj.obj)
# FIXME
# if ins.obj:
# ins.obj.log.error('Error in %s %s %s' % (self.handle,format_exc(),obj.obj))
return False
self.env = mi
return True
else:
self.error = "Impossible to execute invalid or empty script"
return False
def eval(self, out, ins=None):
"""Execute the code and set the output on interface `out`.
Optionally make available an additional namespace ins for the calling instrument. """
self.obj_env.obj = out
self.ins_env.obj = ins
if ins is not None:
# self.set_env_outFile(ins.sharedFile)
self.set_env_outFile(ins.outFile)
if getattr(ins, 'measure', False) is not False:
self.measure_env.obj = ins.measure
if getattr(ins, 'kiln', False) is not False:
self.kiln_env.obj = ins.kiln
do = self.do()
if not do:
return False
# Output dictionary
m = {'temp': 'None', 'time': 'None', 'value': 'None'}
ok = False
for k in m.keys():
v = getattr(self.env, k)
if v == None:
continue
m[k] = v
ok = True
if self.meta:
out[self.meta] = m
return ok
|
tainstr/misura.canon
|
misura/canon/milang/milang.py
|
Python
|
mit
| 6,787
|
[
"VisIt"
] |
99c7b8054c8d07c76da4b7711802756f6eba5ebafb11e8669e3c4697be7b2108
|
""" RemoveLambdas turns lambda into regular functions. """
from pythran.analyses import GlobalDeclarations, ImportedIds
from pythran.passmanager import Transformation
from pythran.tables import MODULES
from copy import copy
import ast
class _LambdaRemover(Transformation):
def __init__(self, pm, name, ctx, lambda_functions, imports):
Transformation.__init__(self)
self.passmanager = pm
self.ctx = ctx
self.prefix = name
self.lambda_functions = lambda_functions
self.imports = imports
self.global_declarations = pm.gather(GlobalDeclarations, ctx.module)
def visit_Lambda(self, node):
if MODULES['functools'] not in self.global_declarations.values():
import_ = ast.Import([ast.alias('functools', None)])
self.imports.append(import_)
self.global_declarations['functools'] = MODULES['functools']
self.generic_visit(node)
forged_name = "{0}_lambda{1}".format(
self.prefix,
len(self.lambda_functions))
ii = self.passmanager.gather(ImportedIds, node, self.ctx)
ii.difference_update(self.lambda_functions) # remove current lambdas
binded_args = [ast.Name(iin, ast.Load()) for iin in sorted(ii)]
node.args.args = ([ast.Name(iin, ast.Param()) for iin in sorted(ii)] +
node.args.args)
forged_fdef = ast.FunctionDef(
forged_name,
copy(node.args),
[ast.Return(node.body)],
[])
self.lambda_functions.append(forged_fdef)
proxy_call = ast.Name(forged_name, ast.Load())
if binded_args:
return ast.Call(
ast.Attribute(
ast.Name('functools', ast.Load()),
"partial",
ast.Load()
),
[proxy_call] + binded_args,
[],
None,
None)
else:
return proxy_call
class RemoveLambdas(Transformation):
"""
Turns lambda into top-level functions.
>>> import ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(y): lambda x:y+x")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(RemoveLambdas, node)
>>> print pm.dump(backend.Python, node)
import functools
def foo(y):
functools.partial(foo_lambda0, y)
def foo_lambda0(y, x):
return (y + x)
"""
def visit_Module(self, node):
self.lambda_functions = list()
self.imports = list()
self.generic_visit(node)
node.body = self.imports + node.body + self.lambda_functions
return node
def visit_FunctionDef(self, node):
lr = _LambdaRemover(self.passmanager, node.name, self.ctx,
self.lambda_functions, self.imports)
node.body = map(lr.visit, node.body)
return node
|
artas360/pythran
|
pythran/transformations/remove_lambdas.py
|
Python
|
bsd-3-clause
| 2,969
|
[
"VisIt"
] |
4c9f21e77d3e0a1ead003af103e81b42658bae66094324fa7af7c9c36fb423a6
|
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from nose.tools import assert_true, assert_equal
from hyperspy._signals.signal1d import Signal1D
from hyperspy.components1d import Gaussian
class TestSetParameterInModel:
def setUp(self):
g1 = Gaussian()
g2 = Gaussian()
g3 = Gaussian()
s = Signal1D(np.arange(10))
m = s.create_model()
m.append(g1)
m.append(g2)
m.append(g3)
self.g1 = g1
self.g2 = g2
self.g3 = g3
self.model = m
def test_set_parameter_in_model_not_free(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
assert_equal(len(g1.free_parameters), 0)
assert_equal(len(g2.free_parameters), 0)
assert_equal(len(g3.free_parameters), 0)
def test_set_parameter_in_model_free(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
g1.A.free = False
g2.sigma.free = False
g3.centre.free = False
m.set_parameters_free()
assert_equal(len(g1.free_parameters), len(g1.parameters))
assert_equal(len(g2.free_parameters), len(g2.parameters))
assert_equal(len(g3.free_parameters), len(g3.parameters))
def test_set_parameter_in_model1(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free([g1, g2])
assert_equal(len(g1.free_parameters), 0)
assert_equal(len(g2.free_parameters), 0)
assert_equal(len(g3.free_parameters), len(g3.parameters))
def test_set_parameter_in_model2(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
m.set_parameters_free([g3])
assert_equal(len(g1.free_parameters), 0)
assert_equal(len(g2.free_parameters), 0)
assert_equal(len(g3.free_parameters), len(g3.parameters))
def test_set_parameter_in_model3(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free(parameter_name_list=['A'])
assert_true(not g1.A.free)
assert_true(g1.sigma.free)
assert_true(g1.centre.free)
assert_true(not g2.A.free)
assert_true(g2.sigma.free)
assert_true(g2.centre.free)
assert_true(not g3.A.free)
assert_true(g3.sigma.free)
assert_true(g3.centre.free)
def test_set_parameter_in_model4(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free([g2], parameter_name_list=['A'])
assert_true(g1.A.free)
assert_true(g1.sigma.free)
assert_true(g1.centre.free)
assert_true(not g2.A.free)
assert_true(g2.sigma.free)
assert_true(g2.centre.free)
assert_true(g3.A.free)
assert_true(g3.sigma.free)
assert_true(g3.centre.free)
def test_set_parameter_in_model5(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
m.set_parameters_free([g1], parameter_name_list=['centre'])
assert_true(not g1.A.free)
assert_true(not g1.sigma.free)
assert_true(g1.centre.free)
assert_true(not g2.A.free)
assert_true(not g2.sigma.free)
assert_true(not g2.centre.free)
assert_true(not g3.A.free)
assert_true(not g3.sigma.free)
assert_true(not g3.centre.free)
|
vidartf/hyperspy
|
hyperspy/tests/model/test_set_parameter_state.py
|
Python
|
gpl-3.0
| 4,277
|
[
"Gaussian"
] |
462ac76563990fc1f8e7c10ec937c696c77fdc9b188408b1722a1072b60580cb
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import warnings
import numpy as np
from math import pi
import unittest
import os
from monty.os.path import which
from pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator, \
VoronoiNN, JmolNN, MinimumDistanceNN, OpenBabelNN, CovalentBondNN,\
MinimumOKeeffeNN, MinimumVIRENN, \
get_neighbors_of_site_with_index, site_is_of_motif_type, \
NearNeighbors, LocalStructOrderParams, BrunnerNN_reciprocal, \
BrunnerNN_real, BrunnerNN_relative, EconNN, CrystalNN, CutOffDictNN, \
Critic2NN, solid_angle
from pymatgen import Element, Molecule, Structure, Lattice
from pymatgen.util.testing import PymatgenTest
try:
import openbabel as ob
import pybel as pb
except ImportError:
pb = None
ob = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class ValenceIonicRadiusEvaluatorTest(PymatgenTest):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Vacancy
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
self._mgo_valrad_evaluator = ValenceIonicRadiusEvaluator(self._mgo_uc)
def test_valences_ionic_structure(self):
valence_dict = self._mgo_valrad_evaluator.valences
for val in list(valence_dict.values()):
self.assertTrue(val in {2, -2})
def test_radii_ionic_structure(self):
radii_dict = self._mgo_valrad_evaluator.radii
for rad in list(radii_dict.values()):
self.assertTrue(rad in {0.86, 1.26})
def tearDown(self):
del self._mgo_uc
del self._mgo_valrad_evaluator
class VoronoiNNTest(PymatgenTest):
def setUp(self):
self.s = self.get_structure('LiFePO4')
self.nn = VoronoiNN(targets=[Element("O")])
self.s_sic = self.get_structure('Si')
self.s_sic["Si"] = {'Si': 0.5, 'C': 0.5}
self.nn_sic = VoronoiNN()
def test_get_voronoi_polyhedra(self):
self.assertEqual(len(self.nn.get_voronoi_polyhedra(self.s, 0).items()), 8)
def test_get_cn(self):
self.assertAlmostEqual(self.nn.get_cn(
self.s, 0, use_weights=True), 5.809265748999465, 7)
self.assertAlmostEqual(self.nn_sic.get_cn(
self.s_sic, 0, use_weights=True), 4.5381161643940668, 7)
def test_get_coordinated_sites(self):
self.assertEqual(len(self.nn.get_nn(self.s, 0)), 8)
def test_volume(self):
self.nn.targets = None
volume = 0
for n in range(len(self.s)):
for nn in self.nn.get_voronoi_polyhedra(self.s, n).values():
volume += nn['volume']
self.assertAlmostEqual(self.s.volume, volume)
def test_solid_angle(self):
self.nn.targets = None
for n in range(len(self.s)):
angle = 0
for nn in self.nn.get_voronoi_polyhedra(self.s, n).values():
angle += nn['solid_angle']
self.assertAlmostEqual(4 * np.pi, angle)
self.assertEqual(solid_angle([0,0,0], [[1,0,0],[-1,0,0],[0,1,0]]), pi)
def test_nn_shell(self):
# First, make a SC lattice. Make my math easier
s = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu'], [[0, 0, 0]])
# Get the 1NN shell
self.nn.targets = None
nns = self.nn.get_nn_shell_info(s, 0, 1)
self.assertEqual(6, len(nns))
# Test the 2nd NN shell
nns = self.nn.get_nn_shell_info(s, 0, 2)
self.assertEqual(18, len(nns))
self.assertArrayAlmostEqual([1] * 6,
[x['weight'] for x in nns if
max(np.abs(x['image'])) == 2])
self.assertArrayAlmostEqual([2] * 12,
[x['weight'] for x in nns if
max(np.abs(x['image'])) == 1])
# Test the 3rd NN shell
nns = self.nn.get_nn_shell_info(s, 0, 3)
for nn in nns:
# Check that the coordinates were set correctly
self.assertArrayAlmostEqual(nn['site'].frac_coords, nn['image'])
# Test with a structure that has unequal faces
cscl = Structure(Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.1045, 2.1045, 2.1045], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.nn.weight = 'area'
nns = self.nn.get_nn_shell_info(cscl, 0, 1)
self.assertEqual(14, len(nns))
self.assertEqual(6, np.isclose([x['weight'] for x in nns],
0.125/0.32476).sum()) # Square faces
self.assertEqual(8, np.isclose([x['weight'] for x in nns], 1).sum())
nns = self.nn.get_nn_shell_info(cscl, 0, 2)
# Weight of getting back on to own site
# Square-square hop: 6*5 options times (0.125/0.32476)^2 weight each
# Hex-hex hop: 8*7 options times 1 weight each
self.assertAlmostEqual(60.4444,
np.sum([x['weight'] for x in nns if x['site_index'] == 0]),
places=3)
def test_adj_neighbors(self):
# Make a simple cubic structure
s = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu'], [[0, 0, 0]])
# Compute the NNs with adjacency
self.nn.targets = None
neighbors = self.nn.get_voronoi_polyhedra(s, 0)
# Each neighbor has 4 adjacent neighbors, all orthogonal
for nn_key, nn_info in neighbors.items():
self.assertEqual(4, len(nn_info['adj_neighbors']))
for adj_key in nn_info['adj_neighbors']:
self.assertEqual(0, np.dot(nn_info['normal'], neighbors[adj_key]['normal']))
def test_all_at_once(self):
# Get all of the sites for LiFePO4
all_sites = self.nn.get_all_voronoi_polyhedra(self.s)
# Make sure they are the same as the single-atom ones
for i, site in enumerate(all_sites):
# Compute the tessellation using only one site
by_one = self.nn.get_voronoi_polyhedra(self.s, i)
# Match the coordinates the of the neighbors, as site matching does not seem to work?
all_coords = np.sort([x['site'].coords for x in site.values()], axis=0)
by_one_coords = np.sort([x['site'].coords for x in by_one.values()], axis=0)
self.assertArrayAlmostEqual(all_coords, by_one_coords)
# Test the nn_info operation
all_nn_info = self.nn.get_all_nn_info(self.s)
for i, info in enumerate(all_nn_info):
# Compute using the by-one method
by_one = self.nn.get_nn_info(self.s, i)
# Get the weights
all_weights = sorted([x['weight'] for x in info])
by_one_weights = sorted([x['weight'] for x in by_one])
self.assertArrayAlmostEqual(all_weights, by_one_weights)
def test_Cs2O(self):
"""A problematic structure in the Materials Project"""
strc = Structure([[4.358219, 0.192833, 6.406960], [2.114414, 3.815824, 6.406960],
[0.311360, 0.192833, 7.742498]],
['O', 'Cs', 'Cs'],
[[0, 0, 0], [0.264318, 0.264318, 0.264318], [0.735682, 0.735682, 0.735682]],
coords_are_cartesian=False)
# Compute the voronoi tessellation
result = VoronoiNN().get_all_voronoi_polyhedra(strc)
self.assertEqual(3, len(result))
def test_filtered(self):
nn = VoronoiNN(weight='area')
# Make a bcc crystal
bcc = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu', 'Cu'],
[[0, 0, 0], [0.5, 0.5, 0.5]], coords_are_cartesian=False)
# Compute the weight of the little face
big_face_area = np.sqrt(3) * 3 / 2 * (2 / 4 / 4)
small_face_area = 0.125
little_weight = small_face_area / big_face_area
# Run one test where you get the small neighbors
nn.tol = little_weight * 0.99
nns = nn.get_nn_info(bcc, 0)
self.assertEqual(14, len(nns))
# Run a second test where we screen out little faces
nn.tol = little_weight * 1.01
nns = nn.get_nn_info(bcc, 0)
self.assertEqual(8, len(nns))
# Make sure it works for the `get_all` operation
all_nns = nn.get_all_nn_info(bcc * [2, 2, 2])
self.assertEqual([8,]*16, [len(x) for x in all_nns])
def tearDown(self):
del self.s
del self.nn
class JmolNNTest(PymatgenTest):
def setUp(self):
self.jmol = JmolNN()
self.jmol_update = JmolNN(el_radius_updates={"Li": 1})
def test_get_nn(self):
s = self.get_structure('LiFePO4')
# Test the default near-neighbor finder.
nsites_checked = 0
for site_idx, site in enumerate(s):
if site.specie == Element("Li"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 0)
nsites_checked += 1
elif site.specie == Element("Fe"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 6)
nsites_checked += 1
elif site.specie == Element("P"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 4)
nsites_checked += 1
self.assertEqual(nsites_checked, 12)
# Test a user override that would cause Li to show up as 6-coordinated
self.assertEqual(self.jmol_update.get_cn(s, 0), 6)
# Verify get_nn function works
self.assertEqual(len(self.jmol_update.get_nn(s, 0)), 6)
def tearDown(self):
del self.jmol
del self.jmol_update
class OpenBabelNNTest(PymatgenTest):
def setUp(self):
self.benzene = Molecule.from_file(os.path.join(test_dir, "benzene.xyz"))
self.acetylene = Molecule.from_file(os.path.join(test_dir, "acetylene.xyz"))
@unittest.skipIf((not (ob and pb)) or (not which("babel")),
"OpenBabel not installed.")
def test_nn_orders(self):
strat = OpenBabelNN()
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertEqual(acetylene[0]["weight"], 3)
self.assertEqual(acetylene[1]["weight"], 1)
# Currently, benzene bonds register either as double or single,
# not aromatic
# Instead of searching for aromatic bonds, we check that bonds are
# detected in the same way from both sides
self.assertEqual(strat.get_nn_info(self.benzene, 0)[0]["weight"],
strat.get_nn_info(self.benzene, 1)[0]["weight"])
@unittest.skipIf((not (ob and pb)) or (not which("babel")),
"OpenBabel not installed.")
def test_nn_length(self):
strat = OpenBabelNN(order=False)
benzene_bonds = strat.get_nn_info(self.benzene, 0)
c_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "C"]
h_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "H"]
self.assertAlmostEqual(c_bonds[0]["weight"], 1.41, 2)
self.assertAlmostEqual(h_bonds[0]["weight"], 1.02, 2)
self.assertAlmostEqual(strat.get_nn_info(self.acetylene, 0)[0]["weight"],
1.19,
2)
def tearDown(self):
del self.benzene
del self.acetylene
class CovalentBondNNTest(PymatgenTest):
def setUp(self):
self.benzene = Molecule.from_file(os.path.join(test_dir, "benzene.xyz"))
self.acetylene = Molecule.from_file(os.path.join(test_dir, "acetylene.xyz"))
def test_nn_orders(self):
strat = CovalentBondNN()
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertEqual(acetylene[0]["weight"], 3)
self.assertEqual(acetylene[1]["weight"], 1)
benzene = strat.get_nn_info(self.benzene, 0)
self.assertAlmostEqual(benzene[0]["weight"], 1.6596, places=4)
def test_nn_length(self):
strat = CovalentBondNN(order=False)
benzene_bonds = strat.get_nn_info(self.benzene, 0)
c_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "C"]
h_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "H"]
self.assertAlmostEqual(c_bonds[0]["weight"], 1.41, 2)
self.assertAlmostEqual(h_bonds[0]["weight"], 1.02, 2)
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertAlmostEqual(acetylene[0]["weight"], 1.19, places=2)
def test_bonded_structure(self):
strat = CovalentBondNN()
benzene = strat.get_bonded_structure(self.benzene)
self.assertEqual(len(benzene.find_rings()), 1)
acetylene = strat.get_bonded_structure(self.acetylene)
self.assertEqual(len(acetylene.graph.nodes), 4)
def tearDown(self):
del self.benzene
del self.acetylene
class MiniDistNNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.mos2 = Structure(
Lattice([[3.19, 0, 0], [-1.595, 2.763, 0], [0, 0, 17.44]]),
['Mo', 'S', 'S'], [[-1e-06, 1.842, 3.72], [1.595, 0.92, 5.29], \
[1.595, 0.92, 2.155]], coords_are_cartesian=True)
def test_all_nn_classes(self):
self.assertAlmostEqual(MinimumDistanceNN(cutoff=5, get_all_sites=True).get_cn(
self.cscl, 0), 14)
self.assertAlmostEqual(MinimumDistanceNN().get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(MinimumDistanceNN().get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(MinimumDistanceNN(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(MinimumDistanceNN(tol=0.1).get_cn(
self.mos2, 0), 6)
for image in MinimumDistanceNN(tol=0.1).get_nn_images(self.mos2, 0):
self.assertTrue(image in [(0, 0, 0), (0, 1, 0), (-1, 0, 0),
(0, 0, 0), (0, 1, 0), (-1, 0, 0)])
self.assertAlmostEqual(MinimumOKeeffeNN(tol=0.01).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(MinimumOKeeffeNN(tol=0.01).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(MinimumOKeeffeNN(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(MinimumVIRENN(tol=0.01).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(MinimumVIRENN(tol=0.01).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(MinimumVIRENN(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(BrunnerNN_reciprocal(tol=0.01).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(BrunnerNN_reciprocal(tol=0.01).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(BrunnerNN_reciprocal(tol=0.01).get_cn(
self.cscl, 0), 14)
self.assertAlmostEqual(BrunnerNN_relative(tol=0.01).get_cn(
self.diamond, 0), 16)
self.assertAlmostEqual(BrunnerNN_relative(tol=0.01).get_cn(
self.nacl, 0), 18)
self.assertAlmostEqual(BrunnerNN_relative(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(BrunnerNN_real(tol=0.01).get_cn(
self.diamond, 0), 16)
self.assertAlmostEqual(BrunnerNN_real(tol=0.01).get_cn(
self.nacl, 0), 18)
self.assertAlmostEqual(BrunnerNN_real(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(EconNN(tol=0.01).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(EconNN(tol=0.01).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(EconNN(tol=0.01).get_cn(
self.cscl, 0), 14)
self.assertAlmostEqual(VoronoiNN(tol=0.5).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(VoronoiNN(tol=0.5).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(VoronoiNN(tol=0.5).get_cn(
self.cscl, 0), 8)
def test_get_local_order_params(self):
nn = MinimumDistanceNN()
ops = nn.get_local_order_parameters(self.diamond, 0)
self.assertAlmostEqual(ops['tetrahedral'], 0.9999934389036574)
ops = nn.get_local_order_parameters(self.nacl, 0)
self.assertAlmostEqual(ops['octahedral'], 0.9999995266669)
def tearDown(self):
del self.diamond
del self.nacl
del self.cscl
del self.mos2
class MotifIdentificationTest(PymatgenTest):
def setUp(self):
self.silicon = Structure(
Lattice.from_lengths_and_angles(
[5.47, 5.47, 5.47],
[90.0, 90.0, 90.0]),
["Si", "Si", "Si", "Si", "Si", "Si", "Si", "Si"],
[[0.000000, 0.000000, 0.500000],
[0.750000, 0.750000, 0.750000],
[0.000000, 0.500000, 1.000000],
[0.750000, 0.250000, 0.250000],
[0.500000, 0.000000, 1.000000],
[0.250000, 0.750000, 0.250000],
[0.500000, 0.500000, 0.500000],
[0.250000, 0.250000, 0.750000]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_pyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["C", "C", "C", "C", "C", "C"], [
[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], \
[0, 0, 1]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_bipyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["P", "Cl", "Cl", "Cl", "Cl", "Cl"], [
[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0], [1.74937, -1.01, 0], \
[-1.74937, -1.01, 0], [0, 0, -2.14]], validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
def test_site_is_of_motif_type(self):
for i in range(self.diamond.num_sites):
self.assertEqual(site_is_of_motif_type(
self.diamond, i), "tetrahedral")
for i in range(self.nacl.num_sites):
self.assertEqual(site_is_of_motif_type(
self.nacl, i), "octahedral")
for i in range(self.cscl.num_sites):
self.assertEqual(site_is_of_motif_type(
self.cscl, i), "bcc")
self.assertEqual(site_is_of_motif_type(
self.square_pyramid, 0), "square pyramidal")
for i in range(1, self.square_pyramid.num_sites):
self.assertEqual(site_is_of_motif_type(
self.square_pyramid, i), "unrecognized")
self.assertEqual(site_is_of_motif_type(
self.trigonal_bipyramid, 0), "trigonal bipyramidal")
for i in range(1, self.trigonal_bipyramid.num_sites):
self.assertEqual(site_is_of_motif_type(
self.trigonal_bipyramid, i), "unrecognized")
def test_get_neighbors_of_site_with_index(self):
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.nacl, 0)), 6)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.cscl, 0)), 8)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, delta=0.01)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, cutoff=6)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="voronoi")), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="min_OKeeffe")), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="min_VIRE")), 4)
def tearDown(self):
del self.silicon
del self.diamond
del self.nacl
del self.cscl
class NearNeighborTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
def set_nn_info(self):
# check conformance
# implicitly assumes that all NearNeighbors subclasses
# will correctly identify bonds in diamond, if it
# can't there are probably bigger problems
subclasses = NearNeighbors.__subclasses__()
for subclass in subclasses:
# Critic2NN has external dependency, is tested separately
if 'Critic2' not in str(subclass):
nn_info = subclass().get_nn_info(self.diamond, 0)
self.assertEqual(nn_info[0]['site_index'], 1)
self.assertEqual(nn_info[0]['image'][0], 1)
def tearDown(self):
del self.diamond
class LocalStructOrderParamsTest(PymatgenTest):
def setUp(self):
self.single_bond = Structure(
Lattice.from_lengths_and_angles(
[10, 10, 10], [90, 90, 90]),
["H", "H", "H"], [[1, 0, 0], [0, 0, 0], [6, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.linear = Structure(
Lattice.from_lengths_and_angles(
[10, 10, 10], [90, 90, 90]),
["H", "H", "H"], [[1, 0, 0], [0, 0, 0], [2, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.bent45 = Structure(
Lattice.from_lengths_and_angles(
[10, 10, 10], [90, 90, 90]), ["H", "H", "H"],
[[0, 0, 0], [0.707, 0.707, 0], [0.707, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cubic = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]),
["H"], [[0, 0, 0]], validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None)
self.bcc = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]),
["H", "H"], [[0, 0, 0], [0.5, 0.5, 0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.fcc = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]), ["H", "H", "H", "H"],
[[0, 0, 0], [0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.hcp = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1.633], [90, 90, 120]), ["H", "H"],
[[0.3333, 0.6667, 0.25], [0.6667, 0.3333, 0.75]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.diamond = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]), ["H", "H", "H", "H", "H", "H", "H", "H"],
[[0, 0, 0.5], [0.75, 0.75, 0.75], [0, 0.5, 0], [0.75, 0.25, 0.25],
[0.5, 0, 0], [0.25, 0.75, 0.25], [0.5, 0.5, 0.5],
[0.25, 0.25, 0.75]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.trigonal_off_plane = Structure(
Lattice.from_lengths_and_angles(
[100, 100, 100], [90, 90, 90]),
["H", "H", "H", "H"],
[[0.50, 0.50, 0.50], [0.25, 0.75, 0.25], \
[0.25, 0.25, 0.75], [0.75, 0.25, 0.25]], \
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.regular_triangle = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H"],
[[15, 15.28867, 15.65], [14.5, 15, 15], [15.5, 15, 15], \
[15, 15.866, 15]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H"],
[[15, 15.28867, 15], [14.5, 15, 15], [15.5, 15, 15], \
[15, 15.866, 15]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H", "H"],
[[15, 15, 15], [14.75, 14.75, 15], [14.75, 15.25, 15], \
[15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H", "H"],
[[15, 15, 15.707], [14.75, 14.75, 15], [14.75, 15.25, 15], \
[15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.T_shape = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H"],
[[15, 15, 15], [15, 15, 15.5], [15, 15.5, 15],
[15, 14.5, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H", "H", "H"],
[[15, 15, 15], [15, 15, 15.3535], [14.75, 14.75, 15],
[14.75, 15.25, 15], [15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["Xe", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [1.17969, 0, 0], [-1.17969, 0, 0], \
[1.90877, -2.24389, 0], [-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["Xe", "F", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [0, -1.6237, 1.17969], [1.17969, 0, 0], \
[-1.17969, 0, 0], [1.90877, -2.24389, 0], \
[-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_bipyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["Xe", "F", "F", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [0, -1.6237, -1.17969], \
[0, -1.6237, 1.17969], [1.17969, 0, 0], \
[-1.17969, 0, 0], [1.90877, -2.24389, 0], \
[-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["H", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0.71, 1.2298, 0],
[-0.71, 1.2298, 0], [0.71, -1.2298, 0], [-0.71, -1.2298, 0],
[1.4199, 0, 0], [-1.4199, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), \
["H", "Li", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0, 0, 1.675], [0.71, 1.2298, 0], \
[-0.71, 1.2298, 0], [0.71, -1.2298, 0], [-0.71, -1.2298, 0], \
[1.4199, 0, 0], [-1.4199, 0, 0]], \
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_bipyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), \
["H", "Li", "Li", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0, 0, 1.675], [0, 0, -1.675], \
[0.71, 1.2298, 0], [-0.71, 1.2298, 0], \
[0.71, -1.2298, 0], [-0.71, -1.2298, 0], \
[1.4199, 0, 0], [-1.4199, 0, 0]], \
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["P", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0],
[1.74937, -1.01, 0], [-1.74937, -1.01, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_bipyramidal = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["P", "Cl", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0],
[1.74937, -1.01, 0], [-1.74937, -1.01, 0], [0, 0, -2.14]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.cuboctahedron = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H"],
[[15, 15, 15], [15, 14.5, 14.5], [15, 14.5, 15.5],
[15, 15.5, 14.5], [15, 15.5, 15.5],
[14.5, 15, 14.5], [14.5, 15, 15.5], [15.5, 15, 14.5], [15.5, 15, 15.5],
[14.5, 14.5, 15], [14.5, 15.5, 15], [15.5, 14.5, 15], [15.5, 15.5, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.see_saw_rect = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["H", "H", "H", "H", "H"],
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, -1.0 , 0.0],
[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.sq_face_capped_trig_pris = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["H", "H", "H", "H", "H", "H", "H", "H"],
[[0, 0, 0], [-0.6546536707079771, -0.37796447300922725, 0.6546536707079771],
[0.6546536707079771, -0.37796447300922725, 0.6546536707079771],
[0.0, 0.7559289460184545, 0.6546536707079771],
[-0.6546536707079771, -0.37796447300922725, -0.6546536707079771],
[0.6546536707079771, -0.37796447300922725, -0.6546536707079771],
[0.0, 0.7559289460184545, -0.6546536707079771], [0.0, -1.0, 0.0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
def test_init(self):
self.assertIsNotNone(
LocalStructOrderParams(["cn"], parameters=None, cutoff=0.99))
parameters = [{'norm': 2}]
lostops = LocalStructOrderParams(["cn"], parameters=parameters)
tmp = lostops.get_parameters(0)
parameters[0]['norm'] = 3
self.assertEqual(tmp, lostops.get_parameters(0))
def test_get_order_parameters(self):
# Set up everything.
op_types = ["cn", "bent", "bent", "tet", "oct", "bcc", "q2", "q4", \
"q6", "reg_tri", "sq", "sq_pyr_legacy", "tri_bipyr", "sgl_bd", \
"tri_plan", "sq_plan", "pent_plan", "sq_pyr", "tri_pyr", \
"pent_pyr", "hex_pyr", "pent_bipyr", "hex_bipyr", "T", "cuboct", \
"see_saw_rect", "hex_plan_max", "tet_max", "oct_max", "tri_plan_max", "sq_plan_max", \
"pent_plan_max", "cuboct_max", "tet_max", "sq_face_cap_trig_pris"]
op_params = [None for i in range(len(op_types))]
op_params[1] = {'TA': 1, 'IGW_TA': 1./0.0667}
op_params[2] = {'TA': 45./180, 'IGW_TA': 1./0.0667}
op_params[33] = {'TA': 0.6081734479693927, 'IGW_TA': 18.33, "fac_AA": 1.5, "exp_cos_AA": 2}
ops_044 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.44)
ops_071 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.71)
ops_087 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.87)
ops_099 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.99)
ops_101 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=1.01)
ops_501 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=5.01)
ops_voro = LocalStructOrderParams(op_types, parameters=op_params)
# Single bond.
op_vals = ops_101.get_order_parameters(self.single_bond, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 1000)
op_vals = ops_501.get_order_parameters(self.single_bond, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 799)
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 0)
# Linear motif.
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[1] * 1000), 1000)
# 45 degrees-bent motif.
op_vals = ops_101.get_order_parameters(self.bent45, 0)
self.assertAlmostEqual(int(op_vals[2] * 1000), 1000)
# T-shape motif.
op_vals = ops_101.get_order_parameters(
self.T_shape, 0, indices_neighs=[1,2,3])
self.assertAlmostEqual(int(op_vals[23] * 1000), 1000)
# Cubic structure.
op_vals = ops_099.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 0.0)
self.assertIsNone(op_vals[3])
self.assertIsNone(op_vals[4])
self.assertIsNone(op_vals[5])
self.assertIsNone(op_vals[6])
self.assertIsNone(op_vals[7])
self.assertIsNone(op_vals[8])
op_vals = ops_101.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 6.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 23)
self.assertAlmostEqual(int(op_vals[4] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[5] * 1000), 333)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 763)
self.assertAlmostEqual(int(op_vals[8] * 1000), 353)
self.assertAlmostEqual(int(op_vals[28] * 1000), 1000)
# Bcc structure.
op_vals = ops_087.get_order_parameters(self.bcc, 0)
self.assertAlmostEqual(op_vals[0], 8.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 200)
self.assertAlmostEqual(int(op_vals[4] * 1000), 145)
self.assertAlmostEqual(int(op_vals[5] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
# Fcc structure.
op_vals = ops_071.get_order_parameters(self.fcc, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 36)
self.assertAlmostEqual(int(op_vals[4] * 1000), 78)
self.assertAlmostEqual(int(op_vals[5] * 1000), -2)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 190)
self.assertAlmostEqual(int(op_vals[8] * 1000), 574)
# Hcp structure.
op_vals = ops_101.get_order_parameters(self.hcp, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 33)
self.assertAlmostEqual(int(op_vals[4] * 1000), 82)
self.assertAlmostEqual(int(op_vals[5] * 1000), -26)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 97)
self.assertAlmostEqual(int(op_vals[8] * 1000), 484)
# Diamond structure.
op_vals = ops_044.get_order_parameters(self.diamond, 0)
self.assertAlmostEqual(op_vals[0], 4.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[4] * 1000), 37)
self.assertAlmostEqual(op_vals[5], 0.75)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
self.assertAlmostEqual(int(op_vals[27] * 1000), 1000)
# Trigonal off-plane molecule.
op_vals = ops_044.get_order_parameters(self.trigonal_off_plane, 0)
self.assertAlmostEqual(op_vals[0], 3.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[33] * 1000), 1000)
# Trigonal-planar motif.
op_vals = ops_101.get_order_parameters(self.trigonal_planar, 0)
self.assertEqual(int(op_vals[0] + 0.5), 3)
self.assertAlmostEqual(int(op_vals[14] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[29] * 1000 + 0.5), 1000)
# Regular triangle motif.
op_vals = ops_101.get_order_parameters(self.regular_triangle, 0)
self.assertAlmostEqual(int(op_vals[9] * 1000), 999)
# Square-planar motif.
op_vals = ops_101.get_order_parameters(self.square_planar, 0)
self.assertAlmostEqual(int(op_vals[15] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[30] * 1000 + 0.5), 1000)
# Square motif.
op_vals = ops_101.get_order_parameters(self.square, 0)
self.assertAlmostEqual(int(op_vals[10] * 1000), 1000)
# Pentagonal planar.
op_vals = ops_101.get_order_parameters(
self.pentagonal_planar.sites, 0, indices_neighs=[1,2,3,4,5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 126)
self.assertAlmostEqual(int(op_vals[16] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[31] * 1000 + 0.5), 1000)
# Trigonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.trigonal_pyramid, 0, indices_neighs=[1,2,3,4])
self.assertAlmostEqual(int(op_vals[18] * 1000 + 0.5), 1000)
# Square pyramid motif.
op_vals = ops_101.get_order_parameters(self.square_pyramid, 0)
self.assertAlmostEqual(int(op_vals[11] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 667)
self.assertAlmostEqual(int(op_vals[17] * 1000 + 0.5), 1000)
# Pentagonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.pentagonal_pyramid, 0, indices_neighs=[1,2,3,4,5,6])
self.assertAlmostEqual(int(op_vals[19] * 1000 + 0.5), 1000)
# Hexagonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_pyramid, 0, indices_neighs=[1,2,3,4,5,6,7])
self.assertAlmostEqual(int(op_vals[20] * 1000 + 0.5), 1000)
# Trigonal bipyramidal.
op_vals = ops_101.get_order_parameters(
self.trigonal_bipyramidal.sites, 0, indices_neighs=[1,2,3,4,5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 1000)
# Pentagonal bipyramidal.
op_vals = ops_101.get_order_parameters(
self.pentagonal_bipyramid.sites, 0,
indices_neighs=[1,2,3,4,5,6,7])
self.assertAlmostEqual(int(op_vals[21] * 1000 + 0.5), 1000)
# Hexagonal bipyramid motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_bipyramid, 0, indices_neighs=[1,2,3,4,5,6,7,8])
self.assertAlmostEqual(int(op_vals[22] * 1000 + 0.5), 1000)
# Cuboctahedral motif.
op_vals = ops_101.get_order_parameters(
self.cuboctahedron, 0, indices_neighs=[i for i in range(1, 13)])
self.assertAlmostEqual(int(op_vals[24] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[32] * 1000 + 0.5), 1000)
# See-saw motif.
op_vals = ops_101.get_order_parameters(
self.see_saw_rect, 0, indices_neighs=[i for i in range(1, 5)])
self.assertAlmostEqual(int(op_vals[25] * 1000 + 0.5), 1000)
# Hexagonal planar motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_planar, 0, indices_neighs=[1,2,3,4,5,6])
self.assertAlmostEqual(int(op_vals[26] * 1000 + 0.5), 1000)
# Square face capped trigonal prism.
op_vals = ops_101.get_order_parameters(
self.sq_face_capped_trig_pris, 0,
indices_neighs=[i for i in range(1, 8)])
self.assertAlmostEqual(int(op_vals[34] * 1000 + 0.5), 1000)
# Test providing explicit neighbor lists.
op_vals = ops_101.get_order_parameters(self.bcc, 0, indices_neighs=[1])
self.assertIsNotNone(op_vals[0])
self.assertIsNone(op_vals[3])
with self.assertRaises(ValueError):
ops_101.get_order_parameters(self.bcc, 0, indices_neighs=[2])
def tearDown(self):
del self.single_bond
del self.linear
del self.bent45
del self.cubic
del self.fcc
del self.bcc
del self.hcp
del self.diamond
del self.regular_triangle
del self.square
del self.square_pyramid
del self.trigonal_off_plane
del self.trigonal_pyramid
del self.trigonal_planar
del self.square_planar
del self.pentagonal_pyramid
del self.hexagonal_pyramid
del self.pentagonal_bipyramid
del self.T_shape
del self.cuboctahedron
del self.see_saw_rect
class CrystalNNTest(PymatgenTest):
def setUp(self):
self.lifepo4 = self.get_structure('LiFePO4')
self.lifepo4.add_oxidation_state_by_guess()
self.he_bcc = self.get_structure('He_BCC')
self.he_bcc.add_oxidation_state_by_guess()
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_sanity(self):
with self.assertRaises(ValueError):
cnn = CrystalNN()
cnn.get_cn(self.lifepo4, 0, use_weights=True)
with self.assertRaises(ValueError):
cnn = CrystalNN(weighted_cn=True)
cnn.get_cn(self.lifepo4, 0, use_weights=False)
def test_discrete_cn(self):
cnn = CrystalNN()
cn_array = []
expected_array = [6, 6, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]
for idx, _ in enumerate(self.lifepo4):
cn_array.append(cnn.get_cn(self.lifepo4, idx))
self.assertSequenceEqual(cn_array, expected_array)
def test_weighted_cn(self):
cnn = CrystalNN(weighted_cn=True)
cn_array = []
expected_array = [5.863, 5.8716, 5.863 , 5.8716, 5.7182, 5.7182, 5.719,
5.7181, 3.991 , 3.991 , 3.991 , 3.9907, 3.5997, 3.525,
3.4133, 3.4714, 3.4727, 3.4133, 3.525 , 3.5997,
3.5997, 3.525 , 3.4122, 3.4738, 3.4728, 3.4109,
3.5259, 3.5997]
for idx, _ in enumerate(self.lifepo4):
cn_array.append(cnn.get_cn(self.lifepo4, idx, use_weights=True))
self.assertArrayAlmostEqual(expected_array, cn_array, 2)
def test_weighted_cn_no_oxid(self):
cnn = CrystalNN(weighted_cn=True)
cn_array = []
expected_array = [5.8962, 5.8996, 5.8962, 5.8996, 5.7195, 5.7195,
5.7202, 5.7194, 4.0012, 4.0012, 4.0012, 4.0009,
3.3897, 3.2589, 3.1218, 3.1914, 3.1914, 3.1218,
3.2589, 3.3897, 3.3897, 3.2589, 3.1207, 3.1924,
3.1915, 3.1207, 3.2598, 3.3897]
s = self.lifepo4.copy()
s.remove_oxidation_states()
for idx, _ in enumerate(s):
cn_array.append(cnn.get_cn(s, idx, use_weights=True))
self.assertArrayAlmostEqual(expected_array, cn_array, 2)
def test_fixed_length(self):
cnn = CrystalNN(fingerprint_length=30)
nndata = cnn.get_nn_data(self.lifepo4, 0)
self.assertEqual(len(nndata.cn_weights), 30)
self.assertEqual(len(nndata.cn_nninfo), 30)
def test_cation_anion(self):
cnn = CrystalNN(weighted_cn=True, cation_anion=True)
self.assertAlmostEqual(cnn.get_cn(self.lifepo4, 0, use_weights=True),
5.8630, 2)
def test_x_diff_weight(self):
cnn = CrystalNN(weighted_cn=True, x_diff_weight=0)
self.assertAlmostEqual(cnn.get_cn(self.lifepo4, 0, use_weights=True),
5.8630, 2)
def test_noble_gas_material(self):
cnn = CrystalNN()
self.assertEqual(cnn.get_cn(self.he_bcc, 0, use_weights=False), 0)
cnn = CrystalNN(distance_cutoffs=(1.25, 5))
self.assertEqual(cnn.get_cn(self.he_bcc, 0, use_weights=False), 8)
def test_shifted_sites(self):
cnn = CrystalNN()
sites = [[0., 0.2, 0.2], [0, 0, 0]]
struct = Structure([7, 0, 0, 0, 7, 0, 0, 0, 7], ['I'] * len(sites), sites)
bonded_struct = cnn.get_bonded_structure(struct)
sites_shifted = [[1., 0.2, 0.2], [0, 0, 0]]
struct_shifted = Structure([7, 0, 0, 0, 7, 0, 0, 0, 7], ['I'] * len(sites_shifted),
sites_shifted)
bonded_struct_shifted = cnn.get_bonded_structure(struct_shifted)
self.assertEqual(len(bonded_struct.get_connected_sites(0)),
len(bonded_struct_shifted.get_connected_sites(0)))
class CutOffDictNNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C", "C"], [[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
coords_are_cartesian=True
)
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_cn(self):
nn = CutOffDictNN({('C', 'C'): 2})
self.assertEqual(nn.get_cn(self.diamond, 0), 4)
nn_null = CutOffDictNN()
self.assertEqual(nn_null.get_cn(self.diamond, 0), 0)
def test_from_preset(self):
nn = CutOffDictNN.from_preset("vesta_2019")
self.assertEqual(nn.get_cn(self.diamond, 0), 4)
# test error thrown on unknown preset
self.assertRaises(ValueError, CutOffDictNN.from_preset, "test")
@unittest.skipIf(not which('critic2'), "critic2 executable not present")
class Critic2NNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C", "C"], [[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
coords_are_cartesian=True
)
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_cn(self):
nn = Critic2NN()
#self.assertEqual(nn.get_cn(self.diamond, 0), 4)
if __name__ == '__main__':
unittest.main()
|
blondegeek/pymatgen
|
pymatgen/analysis/tests/test_local_env.py
|
Python
|
mit
| 50,916
|
[
"CRYSTAL",
"Jmol",
"Pybel",
"pymatgen"
] |
18089fea593597846ca96c816da185a6153eff0b2407862b73e48ee31c5cc4dc
|
import sys, os
from ShapeAnalysisModule import ShapeAnalysisModuleWrapper
from ConfigParser import SafeConfigParser
if len(sys.argv) != 2:
print("\n\nUsage: ./SlicerSALT --no-main-window --python-script " + sys.argv[0] + " path/to/SPHARM-PDM/parameters/file\n\n")
sys.exit(1)
if not os.path.exists(sys.argv[1]):
print "\n\nThe SPHARM-PDM parameters file " + sys.argv[1] + " doesn't exit!\n\n"
sys.exit(1)
parser = SafeConfigParser()
parser.read(sys.argv[1])
inputDirectoryPath = parser.get('DirectoryPath', 'inputDirectoryPath')
if not os.path.exists(inputDirectoryPath):
print "/!\ The input directory: " + inputDirectoryPath + " doesn't exit! /!\ "
sys.exit(1)
outputDirectoryPath = parser.get('DirectoryPath', 'outputDirectoryPath')
if not os.path.exists(outputDirectoryPath):
print "/!\ The output directory path: " + outputDirectoryPath + " doesn't exit! /!\ "
sys.exit(1)
if parser.get('SegPostProcess', 'rescale') == 'True' or parser.get('SegPostProcess', 'rescale') == 'true':
RescaleSegPostProcess = True
elif parser.get('SegPostProcess', 'rescale') == 'False' or parser.get('SegPostProcess', 'rescale') == 'false':
RescaleSegPostProcess = False
else:
print "/!\ The paremeter 'rescale' can only take as value True or False /!\ "
sys.exit(1)
sx = float(parser.get('SegPostProcess', 'space').split(',')[0])
if sx < 0 or sx > 1:
print "/!\ The paremeter 'space' for the x direction can only be a float between 0 and 1 /!\ "
sys.exit(1)
sy = float(parser.get('SegPostProcess', 'space').split(',')[1])
if sy < 0 or sy > 1:
print "/!\ The paremeter 'space' for the y direction can only be a float between 0 and 1 /!\ "
sys.exit(1)
sz = float(parser.get('SegPostProcess', 'space').split(',')[2])
if sz < 0 or sz > 1:
print "/!\ The paremeter 'space' for the z direction can only be a float between 0 and 1 /!\ "
sys.exit(1)
if int(parser.get('SegPostProcess', 'label')) < 0 or int(parser.get('SegPostProcess', 'label')) > 100:
print "/!\ The paremeter 'label' can only be a integer between 0 and 100 /!\ "
sys.exit(1)
labelNumber = int(parser.get('SegPostProcess', 'label'))
if parser.get('SegPostProcess', 'gauss') == 'True' or parser.get('SegPostProcess', 'gauss') == 'true':
GaussianFiltering = True
elif parser.get('SegPostProcess', 'gauss') == 'False' or parser.get('SegPostProcess', 'gauss') == 'false':
GaussianFiltering = False
else:
print "/!\ The paremeter 'gauss' can only take as value True or False /!\ "
sys.exit(1)
VarianceX = int(parser.get('SegPostProcess', 'var').split(',')[0])
if VarianceX < 0 or VarianceX > 100:
print "/!\ The paremeter 'var' for the x direction can only be a integer between 0 and 100 /!\ "
sys.exit(1)
VarianceY = int(parser.get('SegPostProcess', 'var').split(',')[1])
if VarianceY < 0 or VarianceY > 100:
print "/!\ The paremeter 'var' for the y direction can only be a integer between 0 and 100 /!\ "
sys.exit(1)
VarianceZ = int(parser.get('SegPostProcess', 'var').split(',')[2])
if VarianceZ < 0 or VarianceZ > 100:
print "/!\ The paremeter 'var' for the z direction can only be a integer between 0 and 100 /!\ "
sys.exit(1)
if int(parser.get('GenParaMesh', 'iter')) < 0 or int(parser.get('GenParaMesh', 'iter')) > 10000:
print "/!\ The paremeter 'iter' can only be a integer between 0 and 10 000 /!\ "
sys.exit(1)
numberofIterations = int(parser.get('GenParaMesh', 'iter'))
if int(parser.get('ParaToSPHARMMesh', 'subdivLevel')) < 0 or int(parser.get('ParaToSPHARMMesh', 'subdivLevel')) > 100:
print "/!\ The paremeter 'subdivLevel' can only be a integer between 0 and 100 /!\ "
sys.exit(1)
SubdivLevelValue = int(parser.get('ParaToSPHARMMesh', 'subdivLevel'))
if int(parser.get('ParaToSPHARMMesh', 'spharmDegree')) < 0 or int(parser.get('ParaToSPHARMMesh', 'spharmDegree')) > 100:
print "/!\ The paremeter 'spharmDegree' can only be a integer between 0 and 100 /!\ "
sys.exit(1)
SPHARMDegreeValue = int(parser.get('ParaToSPHARMMesh', 'spharmDegree'))
if parser.get('ParaToSPHARMMesh', 'medialMesh') == 'True' or parser.get('ParaToSPHARMMesh', 'medialMesh') == 'true':
medialMesh = True
elif parser.get('ParaToSPHARMMesh', 'medialMesh') == 'False' or parser.get('ParaToSPHARMMesh', 'medialMesh') == 'false':
medialMesh = False
else:
print "/!\ The paremeter 'gauss' can only take as value True or False /!\ "
sys.exit(1)
if int(parser.get('ParaToSPHARMMesh', 'thetaIteration')) < 0 or int(parser.get('ParaToSPHARMMesh', 'thetaIteration')) > 21474836:
print "/!\ The paremeter 'spharmDegree' can only be a integer between 0 and 21474836 /!\ "
sys.exit(1)
thetaIterationValue = parser.get('ParaToSPHARMMesh', 'thetaIteration')
if int(parser.get('ParaToSPHARMMesh', 'phiIteration')) < 0 or int(parser.get('ParaToSPHARMMesh', 'phiIteration')) > 21474836:
print "/!\ The paremeter 'spharmDegree' can only be a integer between 0 and 21474836 /!\ "
sys.exit(1)
phiIterationValue = int(parser.get('ParaToSPHARMMesh', 'phiIteration'))
if parser.get('ParaToSPHARMMesh', 'regParaTemplateFileOn') == 'True' or parser.get('ParaToSPHARMMesh', 'regParaTemplateFileOn') == 'true':
useRegTemplate = True
regTemplate = parser.get('ParaToSPHARMMesh', 'regParaTemplate')
if not os.path.exists(regTemplate):
print "/!\ The registration template: " + regTemplate + " doesn't exit! /!\ "
sys.exit(1)
if not regTemplate.endswith('.vtk') and not regTemplate.endswith('.vtp'):
print "/!\ The registration template: " + regTemplate + " is not a VTK file or VTP file! /!\ "
sys.exit(1)
elif parser.get('ParaToSPHARMMesh', 'regParaTemplateFileOn') == 'False' or parser.get('ParaToSPHARMMesh', 'regParaTemplateFileOn') == 'false':
useRegTemplate = False
regTemplate = " "
else:
print "/!\ The paremeter 'regParaTemplateFileOn' can only take as value True or False /!\ "
sys.exit(1)
if parser.get('ParaToSPHARMMesh', 'flipTemplateOn') == 'True' or parser.get('ParaToSPHARMMesh', 'flipTemplateOn') == 'true':
useFlipTemplate = True
flipTemplate = parser.get('ParaToSPHARMMesh', 'flipTemplate')
if not os.path.exists(flipTemplate):
print "/!\ The flip template: " + flipTemplate + " doesn't exit! /!\ "
sys.exit(1)
if not flipTemplate.endswith('.coef'):
print "/!\ The flip template: " + regTemplate + " is not a COEF file! /!\ "
sys.exit(1)
flipTemplate = parser.get('ParaToSPHARMMesh', 'flipTemplate')
elif parser.get('ParaToSPHARMMesh', 'flipTemplateOn') == 'False' or parser.get('ParaToSPHARMMesh', 'flipTemplateOn') == 'false':
useFlipTemplate = False
flipTemplate = " "
else:
print "/!\ The paremeter 'flipTemplateOn' can only take as value True or False /!\ "
sys.exit(1)
if int(parser.get('ParaToSPHARMMesh', 'flip')) < 0 or int(parser.get('ParaToSPHARMMesh', 'flip')) > 8:
print "/!\ The paremeter 'flip' can only be a integer between 0 and 8 /!\ "
sys.exit(1)
choiceOfFlip = int(parser.get('ParaToSPHARMMesh', 'flip'))
ShapeAnalysisModuleInstance = ShapeAnalysisModuleWrapper(inputDirectoryPath, outputDirectoryPath,
RescaleSegPostProcess, sx, sy, sz, labelNumber,
GaussianFiltering, VarianceX, VarianceY, VarianceZ,
numberofIterations,
SubdivLevelValue, SPHARMDegreeValue,
medialMesh, thetaIterationValue, phiIterationValue,
useRegTemplate, regTemplate,
useFlipTemplate, flipTemplate, choiceOfFlip)
ShapeAnalysisModuleInstance.startProcessing()
|
bpaniagua/SPHARM-PDM
|
CommandLineTool/SPHARM-PDM.py
|
Python
|
apache-2.0
| 7,907
|
[
"VTK"
] |
fe1f09fd6c2cdadf73c76a0ca705b5d9786e2a36b88f74e2df6054b8fc6ad4f6
|
"""Define search indexes for hs_core module."""
# NOTE: this has been optimized for the current and future discovery pages.
# Features that are not used have been commented out temporarily
from haystack import indexes
from hs_core.models import BaseResource
from datetime import datetime
from nameparser import HumanName
import probablepeople
from django.conf import settings
import logging
import re
# # SOLR extension needs to be installed for the following to work
# from haystack.utils.geo import Point
adjacent_caps = re.compile("[A-Z][A-Z]")
def remove_whitespace(thing):
intab = ""
outtab = ""
trantab = str.maketrans(intab, outtab, " \t\r\n")
return str(thing).translate(trantab)
def normalize_name(name):
"""
Normalize a name for sorting and indexing.
This uses two powerful python libraries for differing reasons.
`probablepeople` contains a discriminator between company and person names.
This is used to determine whether to parse into last, first, middle or to
leave the name alone.
However, the actual name parser in `probablepeople` is unnecessarily complex,
so that strings that it determines to be human names are parsed instead by
the simpler `nameparser`.
"""
sname = name.strip() # remove leading and trailing spaces
# Recognizer tends to mistake concatenated initials for Corporation name.
# Pad potential initials with spaces before running recognizer
# For any character A-Z followed by "." and another character A-Z, add a space after the first.
# (?=[A-Z]) means to find A-Z after the match string but not match it.
nname = re.sub("(?P<thing>[A-Z]\\.)(?=[A-Z])", "\\g<thing> ", sname)
try:
# probablepeople doesn't understand utf-8 encoding. Hand it pure unicode.
_, type = probablepeople.tag(nname) # discard parser result
except probablepeople.RepeatedLabelError: # if it can't understand the name, it's foreign
type = 'Unknown'
if type == 'Corporation':
return sname # do not parse and reorder company names
# special case for capitalization: flag as corporation
if (adjacent_caps.match(sname)):
return sname
# treat anything else as a human name
nameparts = HumanName(nname)
normalized = ""
if nameparts.last:
normalized = nameparts.last
if nameparts.suffix:
if not normalized:
normalized = nameparts.suffix
else:
normalized = normalized + ' ' + nameparts.suffix
if normalized:
normalized = normalized + ','
if nameparts.title:
if not normalized:
normalized = nameparts.title
else:
normalized = normalized + ' ' + nameparts.title
if nameparts.first:
if not normalized:
normalized = nameparts.first
else:
normalized = normalized + ' ' + nameparts.first
if nameparts.middle:
if not normalized:
normalized = nameparts.middle
else:
normalized = ' ' + normalized + ' ' + nameparts.middle
return normalized.strip()
def get_content_types(res):
""" return a set of content types matching extensions in a resource.
These include content types of logical files, as well as the generic
content types 'Document', 'Spreadsheet', 'Presentation'.
This is only meaningful for Generic or Composite resources.
"""
resource = res.get_content_model() # enable full logical file interface
types = set([res.discovery_content_type]) # accumulate high-level content types.
missing_exts = set() # track unmapped file extensions
all_exts = set() # track all file extensions
# categorize logical files by type, and files without a logical file by extension.
for f in resource.files.all():
# collect extensions of files
path = f.short_path
path = path.split(".") # determine last extension
if len(path) > 1:
ext = path[len(path)-1]
if len(ext) <= 5: # skip obviously non-MIME extensions
all_exts.add(ext.lower())
else:
ext = None
else:
ext = None
if f.has_logical_file:
candidate_type = type(f.logical_file).get_discovery_content_type()
types.add(candidate_type)
else:
if ext is not None:
missing_exts.add(ext.lower())
# categorize common extensions that are not part of logical files.
for ext_type in settings.DISCOVERY_EXTENSION_CONTENT_TYPES:
if missing_exts & settings.DISCOVERY_EXTENSION_CONTENT_TYPES[ext_type]:
types.add(ext_type)
missing_exts -= settings.DISCOVERY_EXTENSION_CONTENT_TYPES[ext_type]
if missing_exts: # if there is anything left over, then mark as Generic
types.add('Generic Data')
return (types, missing_exts, all_exts)
def discoverable(thing):
""" return True if the string given is discoverable information, False if not """
if thing is not None and thing.strip() != "Unknown" and thing.strip() != "":
return True
return False
class BaseResourceIndex(indexes.SearchIndex, indexes.Indexable):
"""Define base class for resource indexes."""
text = indexes.CharField(document=True, use_template=True, stored=False)
short_id = indexes.CharField(model_attr='short_id')
doi = indexes.CharField(model_attr='doi', null=True, stored=False)
author = indexes.FacetCharField() # normalized to last, first, middle
author_lower = indexes.FacetCharField() # normalized to last, first, middle in lower case
author_url = indexes.CharField(indexed=False, null=True)
title = indexes.FacetCharField() # so that sorting isn't tokenized
title_lower = indexes.FacetCharField() # so that sorting isn't tokenized
abstract = indexes.CharField()
creator = indexes.FacetMultiValueField()
contributor = indexes.FacetMultiValueField()
subject = indexes.FacetMultiValueField()
availability = indexes.FacetMultiValueField()
shareable = indexes.BooleanField()
# TODO: We might need more information than a bool in the future
replaced = indexes.BooleanField(stored=False)
created = indexes.DateTimeField(model_attr='created')
modified = indexes.DateTimeField(model_attr='last_updated')
organization = indexes.MultiValueField(stored=False)
publisher = indexes.CharField(stored=False)
coverage = indexes.MultiValueField(indexed=False)
coverage_type = indexes.MultiValueField()
# TODO: these are duplicated in the coverage field.
east = indexes.FloatField(null=True)
north = indexes.FloatField(null=True)
northlimit = indexes.FloatField(null=True)
eastlimit = indexes.FloatField(null=True)
southlimit = indexes.FloatField(null=True)
westlimit = indexes.FloatField(null=True)
start_date = indexes.DateField(null=True)
end_date = indexes.DateField(null=True)
storage_type = indexes.CharField(stored=False)
# TODO: SOLR extension needs to be installed for these to work
# coverage_point = indexes.LocationField(null=True)
# coverage_southwest = indexes.LocationField(null=True)
# coverage_northeast = indexes.LocationField(null=True)
format = indexes.MultiValueField(stored=False)
identifier = indexes.MultiValueField(stored=False)
language = indexes.CharField(stored=False)
relation = indexes.MultiValueField(stored=False)
resource_type = indexes.FacetCharField()
content_type = indexes.FacetMultiValueField()
content_exts = indexes.FacetMultiValueField()
comment = indexes.MultiValueField(stored=False)
owner_login = indexes.MultiValueField(stored=False)
owner = indexes.FacetMultiValueField()
person = indexes.MultiValueField(stored=False)
# non-core metadata
geometry_type = indexes.CharField(stored=False)
field_name = indexes.CharField(stored=False)
field_type = indexes.CharField(stored=False)
field_type_code = indexes.CharField(stored=False)
variable = indexes.MultiValueField(stored=False)
variable_type = indexes.MultiValueField(stored=False)
variable_shape = indexes.MultiValueField(stored=False)
variable_descriptive_name = indexes.MultiValueField(stored=False)
variable_speciation = indexes.MultiValueField(stored=False)
site = indexes.MultiValueField(stored=False)
method = indexes.MultiValueField(stored=False)
quality_level = indexes.MultiValueField(stored=False)
data_source = indexes.MultiValueField(stored=False)
sample_medium = indexes.MultiValueField(stored=False)
units = indexes.MultiValueField(stored=False)
units_type = indexes.MultiValueField(stored=False)
absolute_url = indexes.CharField(indexed=False)
# extra metadata
extra = indexes.MultiValueField(stored=False)
def get_model(self):
"""Return BaseResource model."""
return BaseResource
def index_queryset(self, using=None):
"""Return queryset including discoverable (and public) resources."""
candidates = self.get_model().objects.filter(raccess__discoverable=True)
show = [x.short_id for x in candidates if x.show_in_discover]
# this must return a queryset; this inefficient method is the best I can do
return self.get_model().objects.filter(short_id__in=show)
def prepare_created(self, obj):
return obj.created.strftime('%Y-%m-%dT%H:%M:%SZ')
def prepare_modified(self, obj):
return obj.last_updated.strftime('%Y-%m-%dT%H:%M:%SZ')
def prepare_title(self, obj):
"""Return metadata title if exists, otherwise return 'none'."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.title is not None and \
obj.metadata.title.value is not None:
return obj.metadata.title.value.lstrip()
else:
return 'none'
def prepare_title_lower(self, obj):
result = self.prepare_title(obj)
return result.lower()
def prepare_abstract(self, obj):
"""Return metadata abstract if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.description is not None and \
obj.metadata.description.abstract is not None:
return obj.metadata.description.abstract.lstrip()
else:
return None
# TODO: it is confusing that the "author" is the first "creator"
def prepare_author(self, obj):
"""
Return first creator if exists, otherwise return empty list.
This must be represented as a single-value field to enable sorting.
"""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.creators is not None:
first_creator = obj.metadata.creators.filter(order=1).first()
if first_creator is None:
return 'none'
elif first_creator.name:
normalized = normalize_name(first_creator.name)
return normalized
elif first_creator.organization:
return first_creator.organization.strip()
else:
return 'none'
else:
return 'none'
def prepare_author_lower(self, obj):
result = self.prepare_author(obj)
return result.lower()
def prepare_author_url(self, obj):
"""
Return metadata author description url if exists, otherwise return None.
This field is stored but not indexed, to avoid hitting the Django database during response.
"""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.creators is not None:
first_creator = obj.metadata.creators.filter(order=1).first()
if first_creator is not None and first_creator.description is not None:
return first_creator.description
else:
return None
else:
return None
def prepare_creator(self, obj):
"""
Return metadata creators if they exist, otherwise return empty array.
This field can have multiple values
"""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.creators is not None:
return [normalize_name(creator.name)
for creator in obj.metadata.creators.all()
.exclude(name__isnull=True).exclude(name='')]
else:
return []
def prepare_contributor(self, obj):
"""
Return metadata contributors if they exist, otherwise return empty array.
This field can have multiple values. Contributors include creators.
"""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.contributors is not None:
output1 = [normalize_name(contributor.name)
for contributor in obj.metadata.contributors.all()
.exclude(name__isnull=True).exclude(name='')]
return list(set(output1)) # eliminate duplicates
else:
return []
def prepare_subject(self, obj):
"""
Return metadata subjects if they exist, otherwise return empty array.
This field can have multiple values.
"""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.subjects is not None:
return [subject.value.strip() for subject in obj.metadata.subjects.all()
.exclude(value__isnull=True)]
else:
return []
def prepare_organization(self, obj):
"""
Return metadata organization if it exists, otherwise return empty array.
"""
organizations = []
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.creators is not None:
for creator in obj.metadata.creators.all():
if(creator.organization is not None):
organizations.append(creator.organization.strip())
return organizations
def prepare_publisher(self, obj):
"""
Return metadata publisher if it exists; otherwise return empty array.
"""
if hasattr(obj, 'metadata') and obj.metadata is not None:
publisher = obj.metadata.publisher
if publisher is not None:
return str(publisher).lstrip()
else:
return None
else:
return None
def prepare_availability(self, obj):
"""
availability is published, public, or discoverable
To make faceting work properly, all flags that are True are represented.
"""
options = []
if hasattr(obj, 'raccess'):
if obj.raccess.published:
options.append('published')
elif obj.raccess.public:
options.append('public')
elif obj.raccess.discoverable:
options.append('discoverable')
else:
options.append('private')
else:
options.append('private')
return options
def prepare_shareable(self, obj):
""" used in depicting results """
return obj.raccess.shareable
def prepare_replaced(self, obj):
"""Return True if 'isReplacedBy' attribute exists, otherwise return False."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.relations is not None:
return obj.metadata.relations.filter(type='isReplacedBy').exists()
else:
return False
def prepare_coverage(self, obj):
"""Return resource coverage if exists, otherwise return empty array."""
# TODO: reject empty coverages
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
return [coverage._value.strip() for coverage in obj.metadata.coverages.all()]
else:
return []
def prepare_coverage_type(self, obj):
"""
Return resource coverage types if exists, otherwise return empty array.
This field can have multiple values.
"""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
return [coverage.type.strip() for coverage in obj.metadata.coverages.all()]
else:
return []
# TODO: THIS IS SIMPLY THE WRONG WAY TO DO THINGS.
# Should use geopy Point and Haystack LocationField throughout,
# instead of encoding limits literally.
# See http://django-haystack.readthedocs.io/en/v2.6.0/spatial.html
# TODO: If there are multiple coverage objects with the same type, only first is returned.
def prepare_east(self, obj):
"""Return resource coverage east bound if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
for coverage in obj.metadata.coverages.all():
if coverage.type == 'point':
return float(coverage.value["east"])
# TODO: this returns the box center, not the extent
# TODO: probably better to call this something different.
elif coverage.type == 'box':
return (float(coverage.value["eastlimit"]) +
float(coverage.value["westlimit"])) / 2
else:
return None
# TODO: If there are multiple coverage objects with the same type, only first is returned.
def prepare_north(self, obj):
"""Return resource coverage north bound if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
for coverage in obj.metadata.coverages.all():
if coverage.type == 'point':
return float(coverage.value["north"])
# TODO: This returns the box center, not the extent
elif coverage.type == 'box':
return (float(coverage.value["northlimit"]) +
float(coverage.value["southlimit"])) / 2
else:
return None
# TODO: If there are multiple coverage objects with the same type, only first is returned.
def prepare_northlimit(self, obj):
"""Return resource coverage north limit if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
# TODO: does not index properly if there are multiple coverages of the same type.
for coverage in obj.metadata.coverages.all():
if coverage.type == 'box':
return coverage.value["northlimit"]
else:
return None
# TODO: If there are multiple coverage objects with the same type, only first is returned.
def prepare_eastlimit(self, obj):
"""Return resource coverage east limit if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
# TODO: does not index properly if there are multiple coverages of the same type.
for coverage in obj.metadata.coverages.all():
if coverage.type == 'box':
return coverage.value["eastlimit"]
else:
return None
# TODO: If there are multiple coverage objects with the same type, only first is returned.
def prepare_southlimit(self, obj):
"""Return resource coverage south limit if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
# TODO: does not index properly if there are multiple coverages of the same type.
for coverage in obj.metadata.coverages.all():
if coverage.type == 'box':
return coverage.value["southlimit"]
else:
return None
# TODO: If there are multiple coverage objects with the same type, only first is returned.
def prepare_westlimit(self, obj):
"""Return resource coverage west limit if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
# TODO: does not index properly if there are multiple coverages of the same type.
for coverage in obj.metadata.coverages.all():
if coverage.type == 'box':
return coverage.value["westlimit"]
else:
return None
# TODO: time coverages do not specify timezone, and timezone support is active.
# TODO: Why aren't time coverages specified as Django DateTime objects?
# TODO: If there are multiple coverage objects with the same type, only first is returned.
def prepare_start_date(self, obj):
"""Return resource coverage start date if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
for coverage in obj.metadata.coverages.all():
if coverage.type == 'period':
clean_date = coverage.value["start"][:10]
start_date = ""
if "/" in clean_date:
parsed_date = clean_date.split("/")
if len(parsed_date) == 3:
start_date = parsed_date[2] + '-' + parsed_date[0] + '-' + parsed_date[1]
elif "-" in clean_date:
parsed_date = clean_date.split("-")
if len(parsed_date) == 3:
start_date = parsed_date[0] + '-' + parsed_date[1] + '-' + parsed_date[2]
start_date = remove_whitespace(start_date) # no embedded spaces
try:
start_date_object = datetime.strptime(start_date, '%Y-%m-%d')
except ValueError:
logger = logging.getLogger(__name__)
logger.error("invalid start date {} in resource {}".format(obj.short_id,
start_date))
return None
return start_date_object
else:
return None
# TODO: time coverages do not specify timezone, and timezone support is active.
# TODO: Why aren't time coverages specified as Django DateTime objects?
# TODO: If there are multiple coverage objects with the same type, only first is returned.
def prepare_end_date(self, obj):
"""Return resource coverage end date if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.coverages is not None:
for coverage in obj.metadata.coverages.all():
if coverage.type == 'period' and 'end' in coverage.value:
clean_date = coverage.value["end"][:10]
end_date = ""
if "/" in clean_date:
parsed_date = clean_date.split("/")
if len(parsed_date) == 3:
end_date = parsed_date[2] + '-' + parsed_date[0] + '-' + parsed_date[1]
else:
parsed_date = clean_date.split("-")
if len(parsed_date) == 3:
end_date = parsed_date[0] + '-' + parsed_date[1] + '-' + parsed_date[2]
end_date = remove_whitespace(end_date) # no embedded spaces
try:
end_date_object = datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
logger = logging.getLogger(__name__)
logger.error("invalid end date {} in resource {}".format(end_date,
obj.short_id))
return None
return end_date_object
else:
return None
def prepare_storage_type(self, obj):
return obj.storage_type
def prepare_format(self, obj):
"""Return metadata formats if metadata exists, otherwise return empty array."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.formats is not None:
return [format.value.strip() for format in obj.metadata.formats.all()]
else:
return []
def prepare_identifier(self, obj):
"""Return metadata identifiers if metadata exists, otherwise return empty array."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.identifiers is not None:
return [identifier.name.strip() for identifier in obj.metadata.identifiers.all()]
else:
return []
def prepare_language(self, obj):
"""Return resource language if exists, otherwise return None."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.language is not None:
return obj.metadata.language.code.strip()
else:
return None
def prepare_relation(self, obj):
"""Return resource relations if exists, otherwise return empty array."""
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.relations is not None:
return [relation.value.strip() for relation in obj.metadata.relations.all()]
else:
return []
def prepare_resource_type(self, obj):
"""Resource type is verbose_name attribute of obj argument."""
return obj.verbose_name
def prepare_content_type(self, obj):
""" register content types for both logical files and some MIME types """
if obj.verbose_name == 'Composite Resource' or \
obj.verbose_name == 'Generic Resource':
output = get_content_types(obj)[0]
return list(output)
else:
return [obj.discovery_content_type]
def prepare_content_exts(self, obj):
""" index by file extension """
output = get_content_types(obj)[2]
return output
def prepare_comment(self, obj):
"""Return list of all comments on resource."""
return [comment.comment.strip() for comment in obj.comments.all()]
def prepare_owner_login(self, obj):
"""Return list of usernames that have ownership access to resource."""
if hasattr(obj, 'raccess'):
return [owner.username for owner in obj.raccess.owners.all()]
else:
return []
# TODO: should utilize name from user profile rather than from User field
def prepare_owner(self, obj):
"""Return list of names of resource owners."""
names = []
if hasattr(obj, 'raccess'):
for owner in obj.raccess.owners.all():
name = normalize_name(owner.first_name + ' ' + owner.last_name)
names.append(name)
return names
# TODO: should utilize name from user profile rather than from User field
def prepare_person(self, obj):
"""Return list of normalized names of resource contributors and owners."""
output0 = []
output1 = []
output2 = []
if hasattr(obj, 'raccess'):
for owner in obj.raccess.owners.all():
name = normalize_name(owner.first_name + ' ' + owner.last_name)
output0.append(name)
if hasattr(obj, 'metadata') and \
obj.metadata is not None and \
obj.metadata.creators is not None:
output1 = [normalize_name(creator.name)
for creator in obj.metadata.creators.all()
.exclude(name__isnull=True).exclude(name='')]
output2 = [normalize_name(contributor.name)
for contributor in obj.metadata.contributors.all()
.exclude(name__isnull=True).exclude(name='')]
return list(set(output0 + output1 + output2)) # eliminate duplicates
# TODO: These should probably be multi-value fields and pick up all types.
def prepare_geometry_type(self, obj):
"""
Return geometry type if metadata exists, otherwise return [].
TODO: there can be multiples of these now.
"""
for f in obj.geofeaturelogicalfile_set.all():
geometry_info = f.metadata.geometryinformation
if geometry_info is not None:
return geometry_info.geometryType
return None
def prepare_field_name(self, obj):
"""
Return metadata field name if exists, otherwise return [].
TODO: there can be multiples of these now.
"""
for f in obj.geofeaturelogicalfile_set.all():
field_info = f.metadata.fieldinformations.all().first()
if field_info is not None and field_info.fieldName is not None:
return field_info.fieldName.strip()
return None
def prepare_field_type(self, obj):
"""
Return metadata field type if exists, otherwise return None.
TODO: there can be multiples of these now.
"""
for f in obj.geofeaturelogicalfile_set.all():
field_info = f.metadata.fieldinformations.all().first()
if field_info is not None and field_info.fieldType is not None:
return field_info.fieldType.strip()
return None
def prepare_field_type_code(self, obj):
"""
Return metadata field type code if exists, otherwise return [].
"""
for f in obj.geofeaturelogicalfile_set.all():
field_info = f.metadata.fieldinformations.all().first()
if field_info is not None and field_info.fieldTypeCode is not None:
return field_info.fieldTypeCode.strip()
return None
def prepare_variable(self, obj):
"""
Return metadata variable names if exists, otherwise return empty array.
"""
variables = set()
for f in obj.netcdflogicalfile_set.all():
for v in f.metadata.variables.all():
if discoverable(v.name):
variables.add(v.name.strip())
for f in obj.timeserieslogicalfile_set.all():
for v in f.metadata.variables:
# TODO: inconsistent use of variable code and variable name
if discoverable(v.variable_name):
variables.add(v.variable_name.strip())
for f in obj.reftimeserieslogicalfile_set.all():
for v in f.metadata.variables:
# TODO: inconsistent use of variable code and variable name
if discoverable(v.name):
variables.add(v.name.strip())
for f in obj.georasterlogicalfile_set.all():
for b in f.metadata.bandInformations:
if discoverable(b.variableName):
variables.add(b.variableName)
return list(variables)
def prepare_variable_type(self, obj):
"""
Return metadata variable types if exists, otherwise return empty array.
Variable type does not exist for referenced time series files.
TODO: Deprecated. Not particularly useful as a search locator.
"""
variable_types = set()
for f in obj.netcdflogicalfile_set.all():
for v in f.metadata.variables.all():
if discoverable(v.type):
variable_types.add(v.type.strip())
for f in obj.timeserieslogicalfile_set.all():
for v in f.metadata.variables:
if discoverable(v.variable_type):
variable_types.add(v.variable_type.strip())
return list(variable_types)
def prepare_variable_shape(self, obj):
"""
Return metadata variable shapes if exists, otherwise return empty array.
Shape only exists for NetCDF resources.
"""
variable_shapes = set()
for f in obj.netcdflogicalfile_set.all():
for v in f.metadata.variables.all():
if discoverable(v.shape):
variable_shapes.add(v.shape.strip())
return list(variable_shapes)
def prepare_variable_descriptive_name(self, obj):
"""
Return metadata variable descriptive names if exists, otherwise return empty array.
TODO: Deprecated. This is empty for all resources and should be deleted.
"""
return []
def prepare_variable_speciation(self, obj):
"""
Return metadata variable speciations if exists, otherwise return empty array.
Speciation only exists for the time series file type.
"""
variable_speciations = set()
for f in obj.timeserieslogicalfile_set.all():
for v in f.metadata.variables:
if discoverable(v.speciation):
variable_speciations.add(v.speciation.strip())
return list(variable_speciations)
def prepare_site(self, obj):
"""
Return list of sites if exists, otherwise return empty array.
Sites only exist for time series.
TODO: inconsistent use of site name and site code
"""
sites = set()
for f in obj.timeserieslogicalfile_set.all():
for s in f.metadata.sites:
if discoverable(s.site_name):
sites.add(s.site_name.strip())
for f in obj.reftimeserieslogicalfile_set.all():
for s in f.metadata.sites:
if discoverable(s.name):
sites.add(s.name.strip())
return list(sites)
def prepare_method(self, obj):
"""
Return list of methods if exists, otherwise return empty array.
Methods only exist for time series and referenced time series.
"""
methods = set()
for f in obj.timeserieslogicalfile_set.all():
for s in f.metadata.methods:
if discoverable(s.method_description):
methods.add(s.method_description.strip())
for f in obj.reftimeserieslogicalfile_set.all():
for s in f.metadata.methods:
if discoverable(s.description):
methods.add(s.description.strip())
return list(methods)
def prepare_quality_level(self, obj):
"""
Return list of quality levels if exists, otherwise return empty array.
TODO: Deprecated. No longer present in data.
"""
return []
def prepare_data_source(self, obj):
"""
Return list of data sources if exists, otherwise return empty array.
TODO: Deprecated: doesn't seem to exist any more.
"""
return []
def prepare_sample_medium(self, obj):
"""
Return list of sample mediums if exists, otherwise return empty array.
Sample mediums only exist for time-series types.
"""
mediums = set()
for f in obj.timeserieslogicalfile_set.all():
for v in f.metadata.time_series_results:
if discoverable(v.sample_medium):
mediums.add(v.sample_medium.strip())
for f in obj.reftimeserieslogicalfile_set.all():
for v in f.metadata.sample_mediums:
if discoverable(v):
mediums.add(v.strip())
return list(mediums)
def prepare_units(self, obj):
"""
Return list of units names if exists, otherwise return empty array.
Match both units name and units type in this field.
TODO: Seriously consider blurring the distinction between units and variables during discovery.
"""
units = set()
for f in obj.timeserieslogicalfile_set.all():
for v in f.metadata.time_series_results:
# TODO: inconsistent use of units name and units type
if discoverable(v.units_name):
units.add(v.units_name.strip())
return list(units)
def prepare_units_type(self, obj):
"""
Return list of units types if exists, otherwise return empty array.
TODO: Deprecated. In future, use "units" to refer to name and type.
"""
units_types = set()
for f in obj.timeserieslogicalfile_set.all():
for v in f.metadata.time_series_results:
if discoverable(v.units_type):
units_types.add(v.units_type.strip())
return list(units_types)
def prepare_absolute_url(self, obj):
"""Return absolute URL of object."""
return obj.get_absolute_url()
def prepare_extra(self, obj):
""" For extra metadata, include both key and value """
extra = []
for key, value in list(obj.extra_metadata.items()):
extra.append(key + ': ' + value)
return extra
|
hydroshare/hydroshare
|
hs_core/search_indexes.py
|
Python
|
bsd-3-clause
| 38,075
|
[
"NetCDF"
] |
17123efc2c139c83d35a377f876785be07a34777f0c7516d4e8b9163405ff7e1
|
"""Small library for working with MinION data
"""
from __future__ import print_function, division
import sys
import os
import pysam
import h5py
import subprocess
import re
import numpy as np
from itertools import islice, izip
from random import shuffle
from motif import getMotif
from serviceCourse.sequenceTools import reverse_complement
from serviceCourse.parsers import read_fasta
from serviceCourse.file_handlers import FolderHandler
# Globals
NORM_DIST_PARAMS = 2
NB_MODEL_PARAMS = 5
def parse_fofn(fofn_file):
files = []
with open(fofn_file, "r") as fH:
for l in fH:
files.append(l.strip())
assert len(files) > 0, "parse_fofn: error, didn't find any files in file of files {}".format(fofn_file)
return files
def kmer_iterator(dna, k):
for i in xrange(len(dna)):
kmer = dna[i:(i + k)]
if len(kmer) == k:
yield kmer
def write_fasta(id, sequence, destination):
print(">", id, sep="", end="\n", file=destination)
print(sequence, end="\n", file=destination)
destination.close()
return
def cull_fast5_files(path_to_files, maximum_files):
# list of alignment files
fast5s = [x for x in os.listdir(path_to_files) if x.endswith(".fast5")]
fast5s = [path_to_files + x for x in fast5s]
if len(fast5s) == 0 or fast5s is None:
print("[cull_fast5_files] : error culling .fast5 files")
sys.exit(1)
# take only some
if maximum_files < len(fast5s):
shuffle(fast5s)
fast5s = fast5s[:maximum_files]
return fast5s
def get_bwa_index(reference, dest, output=None):
bwa = Bwa(reference)
bwa.build_index(dest, output=output)
bwa_ref_index = dest + "temp_bwaIndex"
return bwa_ref_index
def prepareOneD(fast5, npRead_path, oneD_read_path):
out_file = open(npRead_path, "w")
read_file = open(oneD_read_path, "w")
npRead = NanoporeRead(fast5, False)
ok = npRead.write_npRead(out_file=out_file)
if not ok:
npRead.close()
read_file.close()
out_file.close()
return False, None, False
write_fasta(id=fast5, sequence=npRead.template_read, destination=read_file)
version = npRead.version
read_file.close()
out_file.close()
npRead.close()
return True, version, False
def get_npRead_2dseq_and_models(fast5, npRead_path, twod_read_path):
"""process a MinION .fast5 file into a npRead file for use with signalAlign also extracts
the 2D read into fasta format
"""
# setup
out_file = open(npRead_path, 'w')
temp_fasta = open(twod_read_path, "w")
# load MinION read
npRead = NanoporeRead(fast5, True)
# only working with 2D reads right now
if npRead.has2D_alignment_table is False:
npRead.close()
return False, None, False
proceed = npRead.write_npRead(out_file=out_file)
if proceed:
# make the 2d read
write_fasta(id=fast5, sequence=npRead.alignment_table_sequence, destination=temp_fasta)
if npRead.complement_model_id == "complement_median68pA_pop1.model":
pop1_complement = True
else:
pop1_complement = False
version = npRead.version
npRead.close()
return True, version, pop1_complement
else:
npRead.close()
print("problem making npRead for {fast5}".format(fast5=fast5), file=sys.stderr)
return False, None, False
def parse_substitution_file(substitution_file):
fH = open(substitution_file, 'r')
line = fH.readline().split()
forward_sub = line[0]
forward_pos = map(np.int64, line[1:])
line = fH.readline().split()
backward_sub = line[0]
backward_pos = map(np.int64, line[1:])
return (forward_sub, forward_pos), (backward_sub, backward_pos)
def parse_substitution_file2(substitution_file):
assert os.path.isfile(substitution_file), "[parse_substitution_file2]Didn't find "\
"substitution_file here {}".format(substitution_file)
# keep track of the contig header, so we can group the substitution positions by header
positions = {}
header = None
with open(substitution_file, 'r') as fH:
for line in fH:
line = line.split()
this_header = line[0]
if header != this_header: # we're at a new entry
positions[this_header] = {"forward": map(np.int64, line[1:])}
header = this_header
continue
if header == this_header: # we're at the backward positions
positions[header]["backward"] = map(np.int64, line[1:])
continue
return positions
def process_reference_fasta(fasta, work_folder, motif_key=None, sub_char=None):
"""loops over all of the contigs in the reference file, writes the forward and backward sequences
as flat files (no headers or anything) for signalMachine, returns a dict that has the sequence
names as keys and the paths to the processed sequence as keys
"""
ref_sequence_map = {}
for header, comment, sequence in read_fasta(fasta):
# the motif label allows us to make multiple copies of the reference with unique file names
motif_lab = "" if motif_key is None else "%s." % motif_key
# these are the paths to the flat files that have the references
fw_path = work_folder.add_file_path("%s%s.%s.forward.txt" % (motif_lab, header, sub_char))
bw_path = work_folder.add_file_path("%s%s.%s.backward.txt" % (motif_lab, header, sub_char))
# signalAlign likes uppercase
if motif_key is not None:
motif = getMotif(motif_key, sequence)
fw_sequence = motif.forwardSubstitutedSequence(sub_char)
bw_sequence = motif.complementSubstitutedSequence(sub_char)
else:
fw_sequence = sequence.upper()
bw_sequence = reverse_complement(fw_sequence, reverse=False, complement=True)
with open(fw_path, 'w') as fH:
print(fw_sequence, end='\n', file=fH)
with open(bw_path, 'w') as fH:
print(bw_sequence, end='\n', file=fH)
ref_sequence_map[header] = {"forward": fw_path, "backward": bw_path}
return ref_sequence_map
def make_temp_sequence(fasta, sequence_outfile, rc_sequence_outfile):
"""extract the sequence from a fasta and put into a simple file that is used by signalAlign
"""
assert not os.path.isfile(sequence_outfile), "[make_temp_sequence]ERROR: forward file already exists"
assert not os.path.isfile(rc_sequence_outfile), "[make_temp_sequence]ERROR: backward file already exists"
for header, comment, sequence in read_fasta(fasta):
print(sequence, end='\n', file=open(sequence_outfile, 'w'))
complement_sequence = reverse_complement(sequence, reverse=False, complement=True)
print(complement_sequence, end='\n', file=open(rc_sequence_outfile, 'w'))
break
def add_ambiguity_chars_to_reference(input_fasta, substitution_file, sequence_outfile, rc_sequence_outfile,
degenerate_type, sub_char="X"):
def check_substitution(position, seq):
sub_out = seq[position]
if sub_out == sub_char:
return True # no change
if sub_char == "I" and sub_out != "A":
return False
if (sub_char == "E" or sub_char == "O") and sub_out != "C":
return False
if sub_char == "X":
if degenerate_type is None:
return False
if sub_out == "C" and degenerate_type not in ["cytosine2", "cytosine3"]:
return False
if sub_out == "A" and degenerate_type not in ["adenosine"]:
return False
return True
assert os.path.isfile(input_fasta), "ERROR: Didn't find reference FASTA {}".format(input_fasta)
assert os.path.isfile(substitution_file), "ERROR: Didn't find substitution file {}".format(substitution_file)
assert (not os.path.isfile(sequence_outfile)), "ERROR: forward file already exists"
assert (not os.path.isfile(rc_sequence_outfile)), "ERROR: forward file already exists"
# get the first sequence from the FASTA
seq = ""
for header, comment, sequence in read_fasta(input_fasta):
seq += sequence
break
# we want the complement, not the reverse complement, we actually flip it around later
r_seq = reverse_complement(dna=seq, reverse=False, complement=True)
# turn the sequence into a list so we can change the nucleotides
seq = list(seq)
r_seq = list(r_seq)
# parse the substitution file
f, b = parse_substitution_file(substitution_file=substitution_file)
forward_pos = f[1]
backward_pos = b[1]
for position in forward_pos:
ok = check_substitution(position=position, seq=seq)
assert ok, "substitution not allowed"
seq[position] = sub_char
for position in backward_pos:
ok = check_substitution(position=position, seq=r_seq)
assert ok, "substitution not allowed"
r_seq[position] = sub_char
# make them back into strings
seq = ''.join(seq)
r_seq = ''.join(r_seq)
# write to files
print(seq, end='\n', file=open(sequence_outfile, "w"))
print(r_seq, end='\n', file=open(rc_sequence_outfile, "w"))
return
def parse_cigar(cigar_string, ref_start):
assert(cigar_string is not None), "ERROR got cigar {}".format(cigar_string)
assert(ref_start is not None)
# use a regular expression to parse the string into operations and lengths
cigar_tuples = re.findall(r'([0-9]+)([MIDNSHPX=])', cigar_string)
clipping = {"S", "H"}
alignment_operations = {"M", "I", "D"}
# make some counters
query_start = 0
past_start = False
query_end = 0
reference_start = ref_start - 1 # fence posts adjustment
reference_end = 0
exonerated_cigar = " ".join(["%s %i" % (operation, int(length)) for length, operation in
cigar_tuples if operation in alignment_operations])
# this is how you calculate the reference map region
for length, op in cigar_tuples:
if op in clipping and past_start is False:
query_start += int(length)
if op == "M" or op == "D":
reference_end += int(length)
if past_start is False:
past_start = True
if op == "M" or op == "I":
query_end += int(length)
if past_start is False:
past_start = True
query_end = query_end + query_start
reference_end = reference_end + reference_start
return query_start, query_end, reference_start, reference_end, exonerated_cigar
def exonerated_bwa(bwa_index, query, target_regions=None):
# align with bwa
command = "bwa mem -x ont2d {index} {query}".format(index=bwa_index, query=query)
# this is a small SAM file that comes from bwa
nul = open(os.devnull, 'w')
aln = subprocess.check_output(command.split(), stderr=nul)
aln = aln.split("\t") # split
nul.close()
query_start, query_end, reference_start, reference_end, cigar_string = parse_cigar(aln[11], int(aln[9]))
strand = ""
if int(aln[7]) == 16:
# todo redo this swap
strand = "-"
temp = reference_start
reference_start = reference_end
reference_end = temp
if int(aln[7]) == 0:
strand = "+"
elif int(aln[7]) != 0 and int(aln[7]) != 16:
print("unknown alignment flag, exiting", file=sys.stderr)
return False, False
completeCigarString = "cigar: %s %i %i + %s %i %i %s 1 %s" % (
aln[6].split()[-1], query_start, query_end, aln[8], reference_start, reference_end, strand, cigar_string)
if target_regions is not None:
keep = target_regions.check_aligned_region(reference_start, reference_end)
if keep is False:
return False, False
else:
pass
return completeCigarString, strand
def exonerated_bwa_pysam(bwa_index, query, temp_sam_path, target_regions=None):
# type: (string, string, string, TargetRegions)
"""Aligns the read sequnece with BWA to get the guide alignment,
returns the CIGAR (in exonerate format), the strand (plus or minus) and the
contig mapped to if the read aligned. Returns (False, False, False) if there
is a problem with any of the steps or if the read maps to a region not included
within TargetRegions
"""
# align with bwa
ok = Bwa.align(bwa_index=bwa_index, query=query, output_sam_path=temp_sam_path)
if not ok:
return False, False, False
sam = pysam.Samfile(temp_sam_path, 'r')
n_aligned_segments = 0
query_name, flag, reference_name, reference_pos, sam_cigar = None, None, None, None, None
for aligned_segment in sam:
if not aligned_segment.is_secondary and not aligned_segment.is_unmapped:
if n_aligned_segments == 0:
query_name = aligned_segment.qname
flag = aligned_segment.flag
reference_name = sam.getrname(aligned_segment.rname)
reference_pos = aligned_segment.pos + 1 # pysam gives the 0-based leftmost start
sam_cigar = aligned_segment.cigarstring
n_aligned_segments += 1
if n_aligned_segments == 0:
print("[exonerated_bwa_pysam]Read has no aligned segments")
return False, False, False
if sam_cigar is None:
print("[exonerated_bwa_pysam]DEBUG: query name: {qn} flag {fl} reference name {rn} "
"reference pos {rp} sam cigar {cig} n_aligned {nal}"
"".format(qn=query_name, fl=flag, rn=reference_name, rp=reference_pos, cig=sam_cigar,
nal=n_aligned_segments))
if n_aligned_segments > 1:
print("[exonerated_bwa_pysam]WARNING more than 1 mapping, taking the first one heuristically")
query_start, query_end, reference_start, reference_end, cigar_string = parse_cigar(sam_cigar, reference_pos)
strand = ""
assert(flag is not None), "[exonerated_bwa_pysam] ERROR flag is None"
if int(flag) == 16:
strand = "-"
temp = reference_start
reference_start = reference_end
reference_end = temp
if int(flag) == 0:
strand = "+"
elif int(flag) != 0 and int(flag) != 16:
print("[exonerated_bwa_pysam]ERROR unexpected alignment flag {flag}, not continuing with signal alignment"
" for {query}".format(flag=flag, query=query_name), file=sys.stderr)
return False, False, False
assert(reference_name is not None), "[exonerated_bwa_pysam] ERROR reference_name is None"
assert(query_name is not None), "[exonerated_bwa_pysam] ERROR query_name is None"
completeCigarString = "cigar: %s %i %i + %s %i %i %s 1 %s" % (
query_name, query_start, query_end, reference_name, reference_start, reference_end, strand, cigar_string)
if target_regions is not None:
keep = target_regions.check_aligned_region(reference_start, reference_end)
if keep is False:
print("[exonerated_bwa_pysam]Read does not map witin the target regions, passing "
"on signal-level alignment", file=sys.stderr)
return False, False, False
else:
pass
return completeCigarString, strand, reference_name
def default_template_model_from_version(version):
supported_versions = ["1.15.0", "1.19.0", "1.20.0", "1.22.2", "1.22.4", "1.23.0"]
assert version in supported_versions, "got version {}".format(version)
version_index = supported_versions.index(version)
if version_index <= 2:
r7_3_default_template_model = "../models/testModelR73_acegot_template.model"
assert os.path.exists(r7_3_default_template_model), "Didn't find default template R7.3 model"
return r7_3_default_template_model
elif version_index == 5:
r94_default_template_model = "../models/testModelR9p4_acegt_template.model"
assert os.path.exists(r94_default_template_model), "Didn't find default R9.4 model"
return r94_default_template_model
else:
r9_default_template_model = "../models/testModelR9_template.model"
assert os.path.exists(r9_default_template_model), "Didn't find default template R9 model"
return r9_default_template_model
def default_complement_model_from_version(version, pop1_complement=False):
supported_versions = ["1.15.0", "1.19.0", "1.20.0", "1.22.2", "1.22.4"]
assert version in supported_versions, "got version {}".format(version)
version_index = supported_versions.index(version)
if version_index <= 2:
r7_3_default_complement_model = "../models/testModelR73_acegot_complement.model" if not pop1_complement \
else "../models/testModelR9_complement_pop2.model"
assert os.path.exists(r7_3_default_complement_model), "Didn't find default complement R7.3 model"
return r7_3_default_complement_model
else:
r9_default_complement_model = "../models/testModelR9_complement.model"
assert os.path.exists(r9_default_complement_model), "Didn't find default complement R9 model"
return r9_default_complement_model
def degenerate_enum(degenerate_request_string):
degenerate_type = {
"cytosine2": 0,
"cytosine3": 1,
"adenosine": 2,
"variant": 3,
}
assert (degenerate_request_string in degenerate_type.keys()), "Requested degenerate nucleotide set not recognized."
return degenerate_type[degenerate_request_string]
class TargetRegions(object):
def __init__(self, tsv, already_sorted=False):
assert(os.stat(tsv).st_size != 0), "Empty regions file"
self.region_array = np.loadtxt(tsv, usecols=(0, 1), dtype=np.int32)
if len(self.region_array.shape) == 1:
a = np.empty([1, 2], dtype=np.int32)
a[0] = self.region_array
self.region_array = a
if not already_sorted:
self.region_array = np.sort(self.region_array, axis=1)
def check_aligned_region(self, left, right):
if right < left:
left, right = right, left
for region in self.region_array:
if (region[0] >= left) and (region[1] <= right):
return True
else:
continue
return False
class Bwa(object):
"""BWA"""
def __init__(self, target):
self.target = target
self.db_handle = ''
def build_index(self, destination, output=None):
self.db_handle = destination + '/temp_bwaIndex'
#os.system("bwa index -p {0} {1}".format(self.db_handle, self.target))
cmd = "bwa index -p {0} {1}".format(self.db_handle, self.target)
if output is None:
output = open(os.devnull, 'w')
else:
output = open(output, 'w')
try:
subprocess.check_call(cmd.split(), stdout=output, stderr=output)
output.close()
return True
except subprocess.CalledProcessError:
output.close()
return False
@staticmethod
def suffixes():
return [".amb", ".ann", ".bwt", ".pac", ".sa"]
@staticmethod
def align(bwa_index, query, output_sam_path, outerr=None):
for suff in Bwa.suffixes():
assert os.path.exists(bwa_index + suff),\
"[Bwa::align] Didn't find index files {}".format(bwa_index + suff)
assert os.path.exists(query), "[Bwa::align] Didn't find query file {}".format(query)
cmd = "bwa mem -x ont2d {idx} {query}".format(idx=bwa_index, query=query)
if outerr is None:
outerr = open(os.devnull, 'w')
else:
outerr = open(outerr, 'w')
try:
with open(output_sam_path, 'w') as fH:
fH.write(subprocess.check_output(cmd.split(), stderr=outerr))
outerr.close()
return True
except subprocess.CalledProcessError:
outerr.close()
return False
class NanoporeRead(object):
def __init__(self, fast_five_file, twoD=False):
# load the fast5
self.filename = fast_five_file
self.is_open = self.open()
self.read_label = ""
self.alignment_table_sequence = "" # the sequence made by assembling the alignment table
self.template_events = [] # template event sequence
self.complement_events = [] # complement event sequence
self.template_read = "" # template strand fastq sequence
self.complement_read = "" # complement strand fastq sequence
self.template_strand_event_map = [] # map of events to kmers in the 1D template read
self.complement_strand_event_map = [] # map of events to kmers in the 1D complement read
self.template_event_map = [] # map of template events to kmers in 2D read
self.complement_event_map = [] # map of complement events to kmers in 2D read
self.stay_prob = 0
self.template_model_name = ""
self.complement_model_name = ""
self.template_scale = 1
self.template_shift = 1
self.template_drift = 0
self.template_var = 1
self.template_scale_sd = 1
self.template_var_sd = 1
self.complement_scale = 1
self.complement_shift = 1
self.complement_drift = 0
self.complement_var = 1
self.complement_scale_sd = 1
self.complement_var_sd = 1
self.twoD = twoD
if self.twoD:
self.initialize_twoD()
else:
self.initialize()
def open(self):
try:
self.fastFive = h5py.File(self.filename, 'r')
return True
except Exception, e:
self.close()
print("Error opening file {filename}".format(filename=self.filename), file=sys.stderr)
return False
def get_latest_basecall_edition(self, address):
highest = 0
while(highest < 10):
if address.format(highest) in self.fastFive:
highest += 1
continue
else:
return highest - 1
def initialize(self):
# TODO add try/except or something here to check for files that haven't been base-called
highest_1d_basecall = self.get_latest_basecall_edition("/Analyses/Basecall_1D_00{}")
oneD_root_address = "/Analyses/Basecall_1D_00{}".format(highest_1d_basecall)
self.version = self.fastFive[oneD_root_address].attrs["dragonet version"]
assert(self.version == "1.23.0"), "Unsupported version {}".format(self.version)
self.template_event_table_address = oneD_root_address + '/BaseCalled_template/Events'
self.template_model_address = oneD_root_address + "/BaseCalled_template/Model"
self.template_model_id = self.get_model_id(oneD_root_address + "/Summary/basecall_1d_template")
self.template_read = self.fastFive[oneD_root_address + "/BaseCalled_template/Fastq"][()].split()[2]
self.read_label = self.fastFive[oneD_root_address + "/BaseCalled_template/Fastq"][()].split()[0][1:]
self.kmer_length = len(self.fastFive[self.template_event_table_address][0][4])
return
def initialize_twoD(self, get_sequence=False):
self.has2D = False
self.has2D_alignment_table = False
highest_2d_basecall = self.get_latest_basecall_edition("/Analyses/Basecall_2D_00{}")
twoD_address = "/Analyses/Basecall_2D_00{}".format(highest_2d_basecall)
assert(twoD_address in self.fastFive), "[NanoporeRead::initialize_twoD] Didn't find two D address"
self.version = self.fastFive[twoD_address].attrs["dragonet version"]
supported_versions = ["1.15.0", "1.19.0", "1.20.0", "1.22.2", "1.22.4", "1.23.0"]
if self.version not in supported_versions:
print("[NanoporeRead::initialize_twoD]Unsupported Version {} (1.15.0, 1.19.0, 1.20.0, "
"1.22.2, 1.22.4, 1.23.0 supported)".format(self.version), file=sys.stdout)
self.close()
return False
if self.version == "1.15.0":
oneD_address = "/Analyses/Basecall_2D_00{}".format(highest_2d_basecall)
else:
highest_1d_basecall = self.get_latest_basecall_edition("/Analyses/Basecall_1D_00{}")
oneD_address = "/Analyses/Basecall_1D_00{}".format(highest_1d_basecall)
twoD_alignment_table_address = twoD_address + "/BaseCalled_2D/Alignment"
if twoD_alignment_table_address in self.fastFive:
self.twoD_alignment_table = self.fastFive[twoD_alignment_table_address]
if len(self.twoD_alignment_table) > 0:
self.has2D_alignment_table = True
self.kmer_length = len(self.twoD_alignment_table[0][2])
if get_sequence is True:
twoD_read_sequence_address = twoD_address + "/BaseCalled_2D/Fastq"
if twoD_read_sequence_address in self.fastFive:
self.has2D = True
self.twoD_read_sequence = self.fastFive[twoD_read_sequence_address][()].split()[2]
self.twoD_id = self.fastFive[twoD_read_sequence_address][()].split()[0:2][0][1:]
# initialize version-specific paths
if self.version == "1.15.0":
self.template_event_table_address = twoD_address + '/BaseCalled_template/Events'
self.template_model_address = twoD_address + "/BaseCalled_template/Model"
self.template_model_id = self.get_model_id(twoD_address + "/Summary/basecall_1d_template")
self.template_read = self.fastFive[twoD_address + "/BaseCalled_template/Fastq"][()].split()[2]
self.complement_event_table_address = twoD_address + '/BaseCalled_complement/Events'
self.complement_model_address = twoD_address + "/BaseCalled_complement/Model"
self.complement_model_id = self.get_model_id(twoD_address + "/Summary/basecall_1d_complement")
self.complement_read = self.fastFive[twoD_address + "/BaseCalled_complement/Fastq"][()].split()[2]
return True
elif self.version == "1.19.0" or self.version == "1.20.0":
self.template_event_table_address = oneD_address + '/BaseCalled_template/Events'
self.template_model_address = oneD_address + "/BaseCalled_template/Model"
self.template_model_id = self.get_model_id(oneD_address + "/Summary/basecall_1d_template")
self.template_read = self.fastFive[oneD_address + "/BaseCalled_template/Fastq"][()].split()[2]
self.complement_event_table_address = oneD_address + '/BaseCalled_complement/Events'
self.complement_model_address = oneD_address + "/BaseCalled_complement/Model"
self.complement_model_id = self.get_model_id(oneD_address + "/Summary/basecall_1d_complement")
self.complement_read = self.fastFive[oneD_address + "/BaseCalled_complement/Fastq"][()].split()[2]
return True
elif self.version == "1.22.2" or self.version == "1.22.4" or self.version == "1.23.0":
self.template_event_table_address = oneD_address + '/BaseCalled_template/Events'
self.template_model_address = ""
self.template_model_id = None
self.template_read = self.fastFive[oneD_address + "/BaseCalled_template/Fastq"][()].split()[2]
self.complement_event_table_address = oneD_address + '/BaseCalled_complement/Events'
self.complement_model_address = ""
self.complement_model_id = None
self.complement_read = self.fastFive[oneD_address + "/BaseCalled_complement/Fastq"][()].split()[2]
return True
else:
print("Unsupported Version (1.15.0, 1.19.0, 1.20.0, 1.22.2, 1.22.4 supported)", file=sys.stdout)
return False
def assemble_2d_sequence_from_table(self):
"""The 2D read sequence contains kmers that may not map to a template or complement event, which can make
mapping difficult downstream. This function makes a sequence from the 2D alignment table, which is usually
pretty similar to the 2D read, except it is guaranteed to have an event map to every position.
returns: sequence made from alignment table
"""
def find_kmer_overlap(k_i, k_j):
""" finds the overlap between two non-identical kmers.
k_i: one kmer
k_j: another kmer
returns: The number of positions not matching
"""
for i in xrange(1, len(k_i)):
sk_i = k_i[i:]
sk_j = k_j[:-i]
if sk_i == sk_j:
return i
return len(k_i)
self.alignment_table_sequence = ''
self.alignment_table_sequence = self.twoD_alignment_table[0][2]
p_kmer = self.twoD_alignment_table[0][2]
# iterate through the 6-mers in the alignment table
for t, c, kmer in self.twoD_alignment_table:
# if we're at a new 6-mer
if kmer != p_kmer:
# find overlap, could move up to len(6-mer) - 1 bases
i = find_kmer_overlap(p_kmer, kmer)
# append the suffix of the new 6-mer to the sequence
self.alignment_table_sequence += kmer[-i:]
# update
p_kmer = kmer
else:
continue
return
def init_1d_event_maps(self):
"""Maps the events from the template and complement strands to their base called kmers the map
generated by this function is called the "strand_event_map" because it only works for mapping the
strand read (1D read) to to it's events. Uses the same fields as 'get_twoD_event_map' below.
"""
def make_map(events):
event_map = [0]
previous_prob = 0
for i, line in islice(enumerate(events), 1, None):
move = line['move']
this_prob = line['p_model_state']
if move == 1:
event_map.append(i)
if move > 1:
for skip in xrange(move - 1):
event_map.append(i - 1)
event_map.append(i)
if move == 0:
if this_prob > previous_prob:
event_map[-1] = i
previous_prob = this_prob
final_event_index = [event_map[-1]]
padding = final_event_index * (self.kmer_length - 1)
event_map = event_map + padding
return event_map
self.template_strand_event_map = make_map(self.template_events)
assert len(self.template_strand_event_map) == len(self.template_read)
if self.twoD:
self.complement_strand_event_map = make_map(self.complement_events)
assert len(self.complement_strand_event_map) == len(self.complement_read)
return True
def get_twoD_event_map(self):
"""Maps the kmers in the alignment table sequence read to events in the template and complement strand reads
"""
# initialize
alignment_row = 0
prev_alignment_kmer = ''
nb_template_gaps = 0
previous_complement_event = None
previous_template_event = None
#twoD_init = self.initialize_twoD()
#if twoD_init is False:
# return False
if not self.has2D_alignment_table:
print("{file} doesn't have 2D alignment table".format(file=self.filename))
return False
self.assemble_2d_sequence_from_table()
# go thought the kmers in the read sequence and match up the events
for i, seq_kmer in enumerate(kmer_iterator(self.alignment_table_sequence, self.kmer_length)):
# assign the current row's kmer
current_alignment_kmer = self.twoD_alignment_table[alignment_row][2]
# in the situation where there is a repeat kmer in the alignment then
# we want to pick the best event to kmer alignment, TODO implement this
# right now we just use the first alignment
while current_alignment_kmer == prev_alignment_kmer:
alignment_row += 1
current_alignment_kmer = self.twoD_alignment_table[alignment_row][2]
# a match
if seq_kmer == current_alignment_kmer:
template_event = self.twoD_alignment_table[alignment_row][0]
complement_event = self.twoD_alignment_table[alignment_row][1]
# handle template event
# if there is a gap, count it and don't add anything to the map
if template_event == -1:
nb_template_gaps += 1
# if there is an aligned event
if template_event != -1:
# if it is an aligned event and there are no gaps, add it to the map
if nb_template_gaps == 0:
self.template_event_map.append(template_event)
# update
previous_template_event = template_event
# if there were gaps in the alignment we have to add 'best guess'
# event alignments to the map which is the current aligned event
if nb_template_gaps > 0:
self.template_event_map += [template_event] * (nb_template_gaps + 1)
# reset template gaps
nb_template_gaps = 0
# update
previous_template_event = template_event
# handle complement event
# if there is a gap, add the last aligned complement event to the map
if complement_event == -1:
self.complement_event_map.append(previous_complement_event)
# if there is an aligned complement event add it to the map
if complement_event != -1:
self.complement_event_map.append(complement_event)
# update the most recent aligned complement event
previous_complement_event = complement_event
# update previous alignment kmer and increment alignment row
prev_alignment_kmer = current_alignment_kmer
alignment_row += 1
continue
# not a match, meaning that this kmer in the read sequence is not
# in the event alignment but we need to assign an event to it so
# we use the heuristic that we use the alignment of the most
# recent aligned events to this base
if seq_kmer != current_alignment_kmer:
self.template_event_map.append(previous_template_event)
self.complement_event_map.append(previous_complement_event)
continue
# fill in the final events for the partial last kmer
for _ in xrange(self.kmer_length - 1):
self.template_event_map += [previous_template_event] * (nb_template_gaps + 1)
self.complement_event_map.append(previous_complement_event)
nb_template_gaps = 0
# check that we have mapped all of the bases in the 2D read
assert(len(self.template_event_map) == len(self.alignment_table_sequence))
assert(len(self.complement_event_map) == len(self.alignment_table_sequence))
return True
def adjust_events_for_drift(self, events, drift):
"""Adjust event means by drift
"""
if events is None or drift is None:
return False
# transform events by time
# events have format [[mean], [start_time], [std_dev], [length]]
# get the start time of the first event
start_time = events[0][1]
for event in events:
# time since first event
delta_time = event[1] - start_time
# drift adjust
# TODO change adjustment here
event[0] -= (delta_time * drift)
return True
def get_template_events(self):
if self.template_event_table_address in self.fastFive:
self.template_events = self.fastFive[self.template_event_table_address]
return True
if self.template_event_table_address not in self.fastFive:
return False
def get_complement_events(self):
if self.complement_event_table_address in self.fastFive:
self.complement_events = self.fastFive[self.complement_event_table_address]
return True
if self.complement_event_table_address not in self.fastFive:
return False
def get_template_model_adjustments(self):
if self.template_model_address in self.fastFive:
self.has_template_model = True
self.template_scale = self.fastFive[self.template_model_address].attrs["scale"]
self.template_shift = self.fastFive[self.template_model_address].attrs["shift"]
self.template_drift = self.fastFive[self.template_model_address].attrs["drift"]
self.template_var = self.fastFive[self.template_model_address].attrs["var"]
self.template_scale_sd = self.fastFive[self.template_model_address].attrs["scale_sd"]
self.template_var_sd = self.fastFive[self.template_model_address].attrs["var_sd"]
if self.template_model_address not in self.fastFive:
self.has_template_model = False
return
def get_complement_model_adjustments(self):
if self.complement_model_address in self.fastFive:
self.has_complement_model = True
self.complement_scale = self.fastFive[self.complement_model_address].attrs["scale"]
self.complement_shift = self.fastFive[self.complement_model_address].attrs["shift"]
self.complement_drift = self.fastFive[self.complement_model_address].attrs["drift"]
self.complement_var = self.fastFive[self.complement_model_address].attrs["var"]
self.complement_scale_sd = self.fastFive[self.complement_model_address].attrs["scale_sd"]
self.complement_var_sd = self.fastFive[self.complement_model_address].attrs["var_sd"]
if self.complement_model_address not in self.fastFive:
self.has_complement_model = False
return
@staticmethod
def calculate_lambda(noise_mean, noise_stdev):
return (np.power(noise_mean, 3)) / (np.power(noise_stdev, 2))
def export_model(self, skip_bins, model_address, destination):
"""Exports the model to a file. Format:
line 1: [correlation coefficient] [level_mean] [level_sd] [noise_mean]
[noise_sd] [noise_lambda ] (.../kmer) \n
line 2: skip bins \n
line 3: [correlation coefficient] [level_mean] [level_sd, scaled]
[noise_mean] [noise_sd] [noise_lambda ] (.../kmer) \n
"""
assert self.is_open, "ERROR: Fast5 file is not open"
lambdas = []
if model_address in self.fastFive:
model = self.fastFive[model_address]
# line 1
print("0", end=' ', file=destination) # placeholder for correlation parameter
for kmer, level_mean, level_sd, noise_mean, noise_sd, weight in model:
lam = self.calculate_lambda(noise_mean, noise_sd)
lambdas.append(lam)
print(level_mean, level_sd, noise_mean, noise_sd, lam, end=' ', file=destination)
print("", end="\n", file=destination)
# line 2
for p in skip_bins:
print(p, end=' ', file=destination)
print("", end="\n", file=destination)
# line 3
print("0", end=' ', file=destination) # placeholder for correlation parameter
i = 0
for kmer, level_mean, level_sd, noise_mean, noise_sd, weight in model:
lam = lambdas[i]
print(level_mean, (level_sd * 1.75), noise_mean, noise_sd, lam, end=' ', file=destination)
i += 1
print("", end="\n", file=destination)
return True
else:
return False
def export_template_model(self, destination):
# for conditional HMM (as per JTS)
t_skip_prob_bins = [0.487, 0.412, 0.311, 0.229, 0.174, 0.134, 0.115, 0.103, 0.096, 0.092,
0.088, 0.087, 0.084, 0.085, 0.083, 0.082, 0.085, 0.083, 0.084, 0.082,
0.080, 0.085, 0.088, 0.086, 0.087, 0.089, 0.085, 0.090, 0.087, 0.096]
got_model = self.export_model(t_skip_prob_bins, self.template_model_address, destination)
return got_model
def export_complement_model(self, destination):
c_skip_prob_bins = [0.531, 0.478, 0.405, 0.327, 0.257, 0.207, 0.172, 0.154, 0.138, 0.132,
0.127, 0.123, 0.117, 0.115, 0.113, 0.113, 0.115, 0.109, 0.109, 0.107,
0.104, 0.105, 0.108, 0.106, 0.111, 0.114, 0.118, 0.119, 0.110, 0.119]
got_model = self.export_model(c_skip_prob_bins, self.complement_model_address, destination)
return got_model
def get_model_id(self, address):
if address in self.fastFive:
model_name = self.fastFive[address].attrs["model_file"]
model_name = model_name.split('/')[-1]
return model_name
else:
return None
def write_npRead(self, out_file):
if self.is_open is False:
print("[SignalAlign:write_npRead]: problem opeining file {filename}"
"".format(filename=self.filename), file=sys.stderr)
self.close()
return False
if self.twoD:
twoD_map_check = self.get_twoD_event_map()
complement_events_check = self.get_complement_events()
else:
twoD_map_check = True
complement_events_check = True
template_events_check = self.get_template_events()
oneD_event_map_check = self.init_1d_event_maps()
proceed = False not in [twoD_map_check, template_events_check, complement_events_check, oneD_event_map_check]
if proceed:
# get model params
self.get_template_model_adjustments()
if self.twoD:
self.get_complement_model_adjustments()
# transform events
# drift adjustment happens within signalMachine now
#if self.version in ["1.15.0", "1.19.0"]:
# t_transformed = self.adjust_events_for_drift(self.template_events, self.template_drift)
# c_transformed = self.adjust_events_for_drift(self.complement_events, self.complement_drift)
# check if that worked
# if t_transformed is False or c_transformed is False:
# return False
# Make the npRead
# line 1 parameters
print(len(self.alignment_table_sequence), end=' ', file=out_file) # 0alignment read length
print(len(self.template_events), end=' ', file=out_file) # 1nb of template events
print(len(self.complement_events), end=' ', file=out_file) # 2nb of complement events
print(len(self.template_read), end=' ', file=out_file) # 3length of template read
print(len(self.complement_read), end=' ', file=out_file) # 4length of complement read
print(self.template_scale, end=' ', file=out_file) # 5template scale
print(self.template_shift, end=' ', file=out_file) # 6template shift
print(self.template_var, end=' ', file=out_file) # 7template var
print(self.template_scale_sd, end=' ', file=out_file) # 8template scale_sd
print(self.template_var_sd, end=' ', file=out_file) # 9template var_sd
print(self.template_drift, end=' ', file=out_file) # 0template_drift
print(self.complement_scale, end=' ', file=out_file) # 1complement scale
print(self.complement_shift, end=' ', file=out_file) # 2complement shift
print(self.complement_var, end=' ', file=out_file) # 3complement var
print(self.complement_scale_sd, end=' ', file=out_file) # 4complement scale_sd
print(self.complement_var_sd, end=' ', file=out_file) # 5complement var_sd
print(self.complement_drift, end=' ', file=out_file) # 6complement_drift
print((1 if self.twoD else 0), end='\n', file=out_file) # has 2D
# line 2 alignment table sequence
print(self.alignment_table_sequence, end='\n', file=out_file)
# line 3 template read
print(self.template_read, end='\n', file=out_file)
# line 4 template strand map
for _ in self.template_strand_event_map:
print(_, end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 5 complement read
print(self.complement_read, end='\n', file=out_file)
# line 6 complement strand map
for _ in self.complement_strand_event_map:
print(_, end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 7 template 2D event map
for _ in self.template_event_map:
print(_, end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 8 template events
template_start_time = self.template_events[0]['start']
for mean, stdev, length, start in self.template_events['mean', 'stdv', 'length', 'start']:
print(mean, stdev, length, (start - template_start_time), sep=' ', end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 9 complement 2D event map
for _ in self.complement_event_map[::-1]:
print(_, end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 10 complement events
if self.twoD:
complement_start_time = self.complement_events[0]['start']
for mean, stdev, length, start in self.complement_events['mean', 'stdv', 'length', 'start']:
print(mean, stdev, length, (start - complement_start_time), sep=' ', end=' ', file=out_file)
else:
pass
print("", end="\n", file=out_file)
# line 11 model_state (template)
for _ in self.template_events['model_state']:
print(_, sep=' ', end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 12 p(model) (template)
for _ in self.template_events['p_model_state']:
print(_, sep=' ', end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 13 model_state (complement)
if self.twoD:
for _ in self.complement_events['model_state']:
print(_, sep=' ', end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 14 p(model) (complement)
if self.twoD:
for _ in self.complement_events['p_model_state']:
print(_, sep=' ', end=' ', file=out_file)
print("", end="\n", file=out_file)
return True
else:
print("write_npRead: proceed was False", file=sys.stderr)
return False
def close(self):
self.fastFive.close()
class SignalAlignment(object):
def __init__(self,
in_fast5,
reference_map,
path_to_EC_refs,
destination,
stateMachineType,
bwa_index,
in_templateHmm,
in_complementHmm,
in_templateHdp,
in_complementHdp,
threshold,
diagonal_expansion,
constraint_trim,
degenerate,
twoD_chemistry,
target_regions=None,
output_format="full"):
self.in_fast5 = in_fast5 # fast5 file to align
self.reference_map = reference_map # map with paths to reference sequences
self.path_to_EC_refs = path_to_EC_refs # place where the reference sequence with ambiguous characters is
self.destination = destination # place where the alignments go, should already exist
self.stateMachineType = stateMachineType # flag for signalMachine
self.bwa_index = bwa_index # index of reference sequence
self.threshold = threshold # min posterior probability to keep
self.diagonal_expansion = diagonal_expansion # alignment algorithm param
self.constraint_trim = constraint_trim # alignment algorithm param
self.target_regions = target_regions # only signal-align reads that map to these positions
self.output_format = output_format # smaller output files
self.degenerate = degenerate # set of nucleotides for degenerate characters
self.twoD_chemistry = twoD_chemistry # flag for 2D sequencing runs
# if we're using an input hmm, make sure it exists
if (in_templateHmm is not None) and os.path.isfile(in_templateHmm):
self.in_templateHmm = in_templateHmm
else:
self.in_templateHmm = None
if (in_complementHmm is not None) and os.path.isfile(in_complementHmm):
self.in_complementHmm = in_complementHmm
else:
self.in_complementHmm = None
# similarly for HDPs
if (in_templateHdp is not None) and os.path.isfile(in_templateHdp):
self.in_templateHdp = in_templateHdp
else:
self.in_templateHdp = None
if (in_complementHdp is not None) and os.path.isfile(in_complementHdp):
self.in_complementHdp = in_complementHdp
else:
self.in_complementHdp = None
def run(self, get_expectations=False):
print("[SignalAlign::run]Starting on {read}".format(read=self.in_fast5), file=sys.stderr)
if get_expectations:
assert self.in_templateHmm is not None and self.in_complementHmm is not None,\
"Need HMM files for model training"
# file checks
if os.path.isfile(self.in_fast5) is False:
print("signalAlign - did not find .fast5 at{file}".format(file=self.in_fast5))
return False
# containers and defaults
read_label = self.in_fast5.split("/")[-1] # used in the posteriors file as identifier
read_name = self.in_fast5.split("/")[-1][:-6] # get the name without the '.fast5'
# object for handling temporary files
temp_folder = FolderHandler()
temp_dir_path = temp_folder.open_folder(self.destination + "tempFiles_{readLabel}".format(readLabel=read_label))
# read-specific files, could be removed later but are kept right now to make it easier to rerun commands
temp_npRead = temp_folder.add_file_path("temp_{read}.npRead".format(read=read_label))
read_fasta = temp_folder.add_file_path("temp_seq_{read}.fa".format(read=read_label))
temp_samfile = temp_folder.add_file_path("temp_sam_file_{read}.sam".format(read=read_label))
cigar_file = temp_folder.add_file_path("temp_cigar_{read}.txt".format(read=read_label))
# make the npRead and fasta
if not self.twoD_chemistry:
ok, version, pop_1 = prepareOneD(fast5=self.in_fast5, npRead_path=temp_npRead, oneD_read_path=read_fasta)
else:
ok, version, pop1_complement = get_npRead_2dseq_and_models(fast5=self.in_fast5,
npRead_path=temp_npRead,
twod_read_path=read_fasta)
if not ok:
print("file {file} does not have is corrupt".format(file=read_label), file=sys.stderr)
temp_folder.remove_folder()
return False
# add an indicator for the model being used
if self.stateMachineType == "threeState":
model_label = ".sm"
stateMachineType_flag = ""
elif self.stateMachineType == "threeStateHdp":
model_label = ".sm3Hdp"
stateMachineType_flag = "--sm3Hdp "
if self.twoD_chemistry:
assert (self.in_templateHdp is not None) and (self.in_complementHdp is not None), "Need to provide HDPs"
else:
assert self.in_templateHdp is not None, "Need to provide Template HDP"
else: # make invalid stateMachine control?
model_label = ".sm"
stateMachineType_flag = ""
# get orientation and cigar from BWA this serves as the guide alignment
cigar_string, strand, mapped_refernce = exonerated_bwa_pysam(bwa_index=self.bwa_index,
query=read_fasta,
temp_sam_path=temp_samfile,
target_regions=self.target_regions)
cig_handle = open(cigar_file, "w")
cig_handle.write(cigar_string + "\n")
cig_handle.close()
if mapped_refernce not in self.reference_map.keys():
if mapped_refernce is False:
print("[SignalAlignment::run]Read {read} didn't map"
"".format(read=read_label), file=sys.stderr)
else:
print("[SignalAlignment::run]Reference {ref} not found in contigs"
"{keys}".format(ref=mapped_refernce, keys=self.reference_map.keys()),
file=sys.stderr)
temp_folder.remove_folder()
return False
# this gives the format: /directory/for/files/file.model.orientation.tsv
posteriors_file_path = ''
# forward strand
if strand == "+":
if self.output_format == "full":
posteriors_file_path = self.destination + read_name + model_label + ".forward.tsv"
elif self.output_format == "variantCaller":
posteriors_file_path = self.destination + read_name + model_label + ".tsv"
else:
posteriors_file_path = self.destination + read_name + model_label + ".assignments"
# backward strand
if strand == "-":
if self.output_format == "full":
posteriors_file_path = self.destination + read_name + model_label + ".backward.tsv"
elif self.output_format == "variantCaller":
posteriors_file_path = self.destination + read_name + model_label + ".tsv"
else:
posteriors_file_path = self.destination + read_name + model_label + ".assignments"
# didn't map
elif (strand != "+") and (strand != "-"):
print("[SignalAlignment::run]- {read} gave unrecognizable strand flag: {flag}".format(read=read_label, flag=strand),
file=sys.stderr)
temp_folder.remove_folder()
return False
# Alignment/Expectations routine
# containers and defaults
path_to_signalAlign = "./signalMachine"
# flags
# input (match) models
if self.in_templateHmm is None:
self.in_templateHmm = default_template_model_from_version(version=version)
if self.twoD_chemistry:
if self.in_complementHmm is None:
self.in_complementHmm = default_complement_model_from_version(version=version,
pop1_complement=pop1_complement)
assert self.in_templateHmm is not None
if self.twoD_chemistry:
assert self.in_complementHmm is not None
template_model_flag = "-T {} ".format(self.in_templateHmm)
if self.twoD_chemistry:
complement_model_flag = "-C {} ".format(self.in_complementHmm)
else:
complement_model_flag = ""
print("signalAlign - NOTICE: template model {t} complement model {c}"
"".format(t=self.in_templateHmm, c=self.in_complementHmm), file=sys.stderr)
# reference sequences
assert self.reference_map[mapped_refernce]["forward"] is not None
assert self.reference_map[mapped_refernce]["backward"] is not None
forward_reference = self.reference_map[mapped_refernce]["forward"]
backward_reference = self.reference_map[mapped_refernce]["backward"]
assert os.path.isfile(forward_reference)
assert os.path.isfile(backward_reference)
forward_ref_flag = "-f {f_ref} ".format(f_ref=forward_reference)
backward_ref_flag = "-b {b_ref} ".format(b_ref=backward_reference)
# input HDPs
if (self.in_templateHdp is not None) or (self.in_complementHdp is not None):
hdp_flags = "-v {tHdp_loc} ".format(tHdp_loc=self.in_templateHdp)
if self.twoD_chemistry and self.in_complementHdp is not None:
hdp_flags += "-w {cHdp_loc} ".format(cHdp_loc=self.in_complementHdp)
else:
hdp_flags = ""
# threshold
if self.threshold is not None:
threshold_flag = "-D {threshold} ".format(threshold=self.threshold)
else:
threshold_flag = ""
# diagonal expansion
if self.diagonal_expansion is not None:
diag_expansion_flag = "-x {expansion} ".format(expansion=self.diagonal_expansion)
else:
diag_expansion_flag = ""
# constraint trim
if self.constraint_trim is not None:
trim_flag = "-m {trim} ".format(trim=self.constraint_trim)
else:
trim_flag = ""
# NOTE: to turn off banded alignment, uncomment this flag, it just trimms away all of the anchors
#trim_flag = "-m 9999"
# output format
fmts = {"full": 0, "variantCaller": 1, "assignments": 2}
if self.output_format not in fmts.keys():
temp_folder.remove_folder()
return False
out_fmt = "-s {fmt} ".format(fmt=fmts[self.output_format])
# degenerate nucleotide information
if self.degenerate is not None:
degenerate_flag = "-o {} ".format(self.degenerate)
else:
degenerate_flag = ""
if self.twoD_chemistry:
twoD_flag = "--twoD"
else:
twoD_flag = ""
# commands
if get_expectations:
template_expectations_file_path = self.destination + read_name + ".template.expectations"
complement_expectations_file_path = self.destination + read_name + ".complement.expectations"
command = \
"{vA} {td} {degen}{sparse}{model}{f_ref}{b_ref} -q {npRead} " \
"{t_model}{c_model}{thresh}{expansion}{trim} {hdp}-L {readLabel} -p {cigarFile} " \
"-t {templateExpectations} -c {complementExpectations}"\
.format(cigar=cigar_string, vA=path_to_signalAlign, model=stateMachineType_flag,
f_ref=forward_ref_flag, b_ref=backward_ref_flag, cigarFile=cigar_file,
npRead=temp_npRead, readLabel=read_label, td=twoD_flag,
templateExpectations=template_expectations_file_path, hdp=hdp_flags,
complementExpectations=complement_expectations_file_path, t_model=template_model_flag,
c_model=complement_model_flag, thresh=threshold_flag, expansion=diag_expansion_flag,
trim=trim_flag, degen=degenerate_flag, sparse=out_fmt)
else:
command = \
"{vA} {td} {degen}{sparse}{model}{f_ref}{b_ref} -q {npRead} " \
"{t_model}{c_model}{thresh}{expansion}{trim} -p {cigarFile} " \
"-u {posteriors} {hdp}-L {readLabel}"\
.format(cigar=cigar_string, vA=path_to_signalAlign, model=stateMachineType_flag, sparse=out_fmt,
f_ref=forward_ref_flag, b_ref=backward_ref_flag, cigarFile=cigar_file,
readLabel=read_label, npRead=temp_npRead, td=twoD_flag,
t_model=template_model_flag, c_model=complement_model_flag,
posteriors=posteriors_file_path, thresh=threshold_flag, expansion=diag_expansion_flag,
trim=trim_flag, hdp=hdp_flags, degen=degenerate_flag)
# run
print("signalAlign - running command: ", command, end="\n", file=sys.stderr)
os.system(command)
temp_folder.remove_folder()
return True
class SignalHmm(object):
def __init__(self, model_type):
self.match_model_params = 5 # level_mean, level_sd, noise_mean, noise_sd, noise_lambda
self.model_type = model_type # ID of model type
self.state_number = {"threeState": 3, "threeStateHdp": 3}[model_type]
self.symbol_set_size = 0
self.transitions = np.zeros(self.state_number**2)
self.transitions_expectations = np.zeros(self.state_number**2)
self.likelihood = 0.0
self.running_likelihoods = []
self.alphabet_size = 0
self.alphabet = ""
self.kmer_length = 0
self.has_model = False
self.normalized = False
# event model for describing normal distributions for each kmer
self.event_model = {"means": np.zeros(self.symbol_set_size),
"SDs": np.zeros(self.symbol_set_size),
"noise_means": np.zeros(self.symbol_set_size),
"noise_SDs": np.zeros(self.symbol_set_size),
"noise_lambdas": np.zeros(self.symbol_set_size)}
def normalize_transitions_expectations(self):
# normalize transitions
for from_state in xrange(self.state_number):
i = self.state_number * from_state
j = sum(self.transitions_expectations[i:i+self.state_number])
for to_state in xrange(self.state_number):
self.transitions_expectations[i + to_state] = self.transitions_expectations[i + to_state] / j
def set_default_transitions(self):
MATCH_CONTINUE = np.exp(-0.23552123624314988) # stride
MATCH_FROM_GAP_X = np.exp(-0.21880828092192281) # 1 - skip'
MATCH_FROM_GAP_Y = np.exp(-0.013406326748077823) # 1 - (skip + stay)
GAP_OPEN_X = np.exp(-1.6269694202638481) # skip
GAP_OPEN_Y = np.exp(-4.3187242127300092) # 1 - (skip + stride)
GAP_EXTEND_X = np.exp(-1.6269694202638481) # skip'
GAP_EXTEND_Y = np.exp(-4.3187242127239411) # stay (1 - (skip + stay))
GAP_SWITCH_TO_X = 0.000000001
GAP_SWITCH_TO_Y = 0.0
self.transitions = [
MATCH_CONTINUE, GAP_OPEN_X, GAP_OPEN_Y,
MATCH_FROM_GAP_X, GAP_EXTEND_X, GAP_SWITCH_TO_Y,
MATCH_FROM_GAP_Y, GAP_SWITCH_TO_X, GAP_EXTEND_Y
]
return
def check_header_line(self, line, expectations_file):
if len(line) != 4:
print("signalHmm.check_header_line - incorrect header (param line): {}".format(expectations_file), file=sys.stderr)
return False
if int(line[0]) != self.state_number:
print("signalHmm.check_header_line - state number error should be {exp} got {obs}"
"".format(exp=self.state_number, obs=line[0]), file=sys.stderr)
return False
if int(line[1]) != self.alphabet_size:
print("signalHmm.check_header_line - alphabet size error incorrect parameters: {file}, line {line}"
"".format(file=expectations_file, line=''.join(line)), file=sys.stderr)
return False
if line[2] != self.alphabet:
print("signalHmm.check_header_line - incorrect parameters: {file}, line {line}"
"".format(file=expectations_file, line=''.join(line)), file=sys.stderr)
return False
if int(line[3]) != self.kmer_length:
print("signalHmm.check_header_line - incorrect parameters: {file}, line {line}"
"".format(file=expectations_file, line=''.join(line)), file=sys.stderr)
return False
return True
def load_model(self, model_file):
# the model file has the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
assert os.path.exists(model_file), "signalHmm.load_model - didn't find model here{}?".format(model_file)
fH = open(model_file, 'r')
line = fH.readline().split()
# check for correct header length
assert len(line) == 4, "signalHmm.load_model - incorrect line length line:{}".format(''.join(line))
# check stateNumber
assert int(line[0]) == self.state_number, "signalHmm.load_model - incorrect stateNumber got {got} should be {exp}" \
"".format(got=int(line[0]), exp=self.state_number)
# load model parameters
self.alphabet_size = int(line[1])
self.alphabet = line[2]
self.kmer_length = int(line[3])
self.symbol_set_size = self.alphabet_size**self.kmer_length
assert self.symbol_set_size > 0, "signalHmm.load_model - Got 0 for symbol_set_size"
assert self.symbol_set_size <= 6**6, "signalHmm.load_model - Got more than 6^6 for symbol_set_size got {}" \
"".format(self.symbol_set_size)
line = map(float, fH.readline().split())
assert len(line) == len(self.transitions) + 1, "signalHmm.load_model incorrect transitions line"
self.transitions = line[:-1]
self.likelihood = line[-1]
line = map(float, fH.readline().split())
assert len(line) == self.symbol_set_size * NB_MODEL_PARAMS, \
"signalHmm.load_model incorrect event model line"
self.event_model["means"] = line[::NB_MODEL_PARAMS]
self.event_model["SDs"] = line[1::NB_MODEL_PARAMS]
self.event_model["noise_means"] = line[2::NB_MODEL_PARAMS]
self.event_model["noise_SDs"] = line[3::NB_MODEL_PARAMS]
self.event_model["noise_lambdas"] = line[4::NB_MODEL_PARAMS]
assert not np.any(self.event_model["means"] == 0.0), "signalHmm.load_model, this model has 0 E_means"
assert not np.any(self.event_model["SDs"] == 0.0), "signalHmm.load_model, this model has 0 E_means"
assert not np.any(self.event_model["noise_means"] == 0.0), "signalHmm.load_model, this model has 0 E_noise_means"
assert not np.any(self.event_model["noise_SDs"] == 0.0), "signalHmm.load_model, this model has 0 E_noise_SDs"
self.has_model = True
def write(self, out_file):
# the model file has the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
assert self.has_model, "Shouldn't be writing down a Hmm that has no Model"
assert self.normalized, "Shouldn't be writing down a not normalized HMM"
f = open(out_file, 'w')
# line 0
f.write("{stateNumber}\t{alphabetSize}\t{alphabet}\t{kmerLength}\n"
"".format(stateNumber=self.state_number, alphabetSize=self.alphabet_size,
alphabet=self.alphabet, kmerLength=self.kmer_length))
# line 1 transitions
for i in xrange(self.state_number * self.state_number):
f.write("{transition}\t".format(transition=str(self.transitions[i])))
# likelihood
f.write("{}\n".format(str(self.likelihood)))
# line 2 Event Model
for k in xrange(self.symbol_set_size):
f.write("{level_mean}\t{level_sd}\t{noise_mean}\t{noise_sd}\t{noise_lambda}\t"
"".format(level_mean=self.event_model["means"][k], level_sd=self.event_model["SDs"][k],
noise_mean=self.event_model["noise_means"][k], noise_sd=self.event_model["noise_SDs"][k],
noise_lambda=self.event_model["noise_lambdas"][k]))
f.write("\n")
f.close()
class ContinuousPairHmm(SignalHmm):
def __init__(self, model_type):
super(ContinuousPairHmm, self).__init__(model_type=model_type)
self.set_default_transitions()
# bins for expectations
self.mean_expectations = np.zeros(self.symbol_set_size)
self.sd_expectations = np.zeros(self.symbol_set_size)
self.posteriors = np.zeros(self.symbol_set_size)
self.observed = np.zeros(self.symbol_set_size, dtype=bool)
self.has_model = False
self.normalized = False
def add_expectations_file(self, expectations_file):
# expectations files have the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
# line 3: event expectations [mean] [sd] / kmer \n
# line 4: posteriors 1 per kmer \n
# line 5: observed 1 per kmer \n
if not os.path.exists(expectations_file) or os.stat(expectations_file).st_size == 0:
print("Empty or missing file {}".format(expectations_file), file=sys.stderr)
return False
fH = open(expectations_file, 'r')
# line 0
line = fH.readline().split()
header_line_check = self.check_header_line(line=line, expectations_file=expectations_file)
if header_line_check is False:
fH.close()
return False
# line 1: transitions, likelihood
line = map(float, fH.readline().split())
# check if valid
if len(line) != (len(self.transitions) + 1):
print("cpHMM: check_file - bad file (transitions expectations): {}".format(expectations_file),
file=sys.stderr)
fH.close()
return False
self.likelihood += line[-1]
self.transitions_expectations = map(lambda x: sum(x), zip(self.transitions_expectations, line[0:-1]))
# line 2: event model
line = map(float, fH.readline().split())
if len(line) != self.symbol_set_size * NB_MODEL_PARAMS:
print("cpHMM: check_file - bad file (event model): {}".format(expectations_file), file=sys.stderr)
fH.close()
return False
# line 3 event expectations [E_mean, E_sd]
line = map(float, fH.readline().split())
if len(line) != self.symbol_set_size * NORM_DIST_PARAMS:
print("cpHMM: check_file - bad file (event expectations): {}".format(expectations_file), file=sys.stderr)
fH.close()
return False
self.mean_expectations = [i + j for i, j in izip(self.mean_expectations, line[::NORM_DIST_PARAMS])]
self.sd_expectations = [i + j for i, j in izip(self.sd_expectations, line[1::NORM_DIST_PARAMS])]
# line 4, posteriors
line = map(float, fH.readline().split())
if len(line) != self.symbol_set_size:
print("cpHMM: check_file - bad file (posteriors): {}".format(expectations_file), file=sys.stderr)
fH.close()
return False
self.posteriors = map(lambda x: sum(x), zip(self.posteriors, line))
line = map(bool, fH.readline().split())
if len(line) != self.symbol_set_size:
print("cpHMM: check_file - bad file (observations): {}".format(expectations_file), file=sys.stderr)
fH.close()
return False
self.observed = [any(b) for b in zip(self.observed, line)]
fH.close()
return True
def normalize(self, update_transitions, update_emissions):
# normalize transitions expectations
self.normalize_transitions_expectations()
# update
if update_transitions is True:
for i in xrange(self.state_number**2):
self.transitions[i] = self.transitions_expectations[i]
# calculate the new expected mean and standard deviation for the kmer normal distributions
if update_emissions:
for k in xrange(self.symbol_set_size): # TODO implement learning rate
if self.observed[k] is True:
u_k = self.mean_expectations[k] / self.posteriors[k]
o_k = np.sqrt(self.sd_expectations[k] / self.posteriors[k])
if u_k > 0:
self.event_model["means"][k] = u_k
self.event_model["SDs"][k] = o_k
else:
continue
self.normalized = True
class HdpSignalHmm(SignalHmm):
def __init__(self, model_type, threshold):
super(HdpSignalHmm, self).__init__(model_type=model_type)
self.set_default_transitions()
self.threshold = threshold
self.kmer_assignments = []
self.event_assignments = []
self.assignments_record = []
def add_expectations_file(self, expectations_file):
# expectations files have the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
# line 3: event assignments
# line 4: kmer assignments
if not os.path.exists(expectations_file) or os.stat(expectations_file).st_size == 0:
print("Empty or missing file {}".format(expectations_file))
return
fH = open(expectations_file, 'r')
# line 0
line = fH.readline().split()
header_line_check = self.check_header_line(line=line, expectations_file=expectations_file)
if header_line_check is False:
fH.close()
return False
# line 1: transitions, likelihood
line = map(float, fH.readline().split())
if len(line) != (len(self.transitions) + 1):
print("hdpHmm.add_expectations_file - problem with file {f} transitions line {l}, incorrect length"
"".format(f=expectations_file, l=''.join(line)), file=sys.stdout)
fH.close()
return False
self.likelihood += line[-1]
self.transitions_expectations = map(lambda x: sum(x), zip(self.transitions_expectations, line[0:-1]))
# line 2: event model
line = map(float, fH.readline().split())
if len(line) != self.symbol_set_size * NB_MODEL_PARAMS:
print("hdpHmm.add_expectations_file - problem with event model in file {}"
"".format(expectations_file), file=sys.stderr)
fH.close()
return False
# line 3: event assignments
line = map(float, fH.readline().split())
self.event_assignments += line
# line 4: kmer assignments
line = map(str, fH.readline().split())
self.kmer_assignments += line
fH.close()
return True
def reset_assignments(self):
self.assignments_record.append(len(self.event_assignments))
self.event_assignments = []
self.kmer_assignments = []
def normalize(self, update_transitions, update_emissions=None):
self.normalize_transitions_expectations()
if update_transitions is True:
for i in xrange(self.state_number**2):
self.transitions[i] = self.transitions_expectations[i]
self.normalized = True
|
mitenjain/signalAlign
|
scripts/signalAlignLib.py
|
Python
|
mit
| 77,140
|
[
"BWA",
"pysam"
] |
af7c56a02e977c424279f091ffd20714c21eda4023cd1decfba0e630f6840c51
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************************************
espressopp.interaction.LennardJonesExpand
*****************************************
.. math::
V(r) = 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\left( \frac{\sigma}{r} \right)^{6} \right]
.. function:: espressopp.interaction.LennardJonesExpand(epsilon, sigma, delta, cutoff, shift)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param delta: (default: 0.0)
:param cutoff: (default: infinity)
:param shift: (default: "auto")
:type epsilon: real
:type sigma: real
:type delta: real
:type cutoff:
:type shift:
.. function:: espressopp.interaction.VerletListLennardJonesExpand(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListLennardJonesExpand.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListLennardJonesExpand.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListLennardJonesExpand(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListLennardJonesExpand.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesExpand(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesExpand.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_LennardJonesExpand, \
interaction_VerletListLennardJonesExpand, \
interaction_CellListLennardJonesExpand, \
interaction_FixedPairListLennardJonesExpand
class LennardJonesExpandLocal(PotentialLocal, interaction_LennardJonesExpand):
def __init__(self, epsilon=1.0, sigma=1.0, delta=0.0,
cutoff=infinity, shift="auto"):
"""Initialize the local LennardJonesExpand object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_LennardJonesExpand,
epsilon, sigma, delta, cutoff)
else:
cxxinit(self, interaction_LennardJonesExpand,
epsilon, sigma, delta, cutoff, shift)
class VerletListLennardJonesExpandLocal(InteractionLocal, interaction_VerletListLennardJonesExpand):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLennardJonesExpand, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class CellListLennardJonesExpandLocal(InteractionLocal, interaction_CellListLennardJonesExpand):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLennardJonesExpand, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListLennardJonesExpandLocal(InteractionLocal, interaction_FixedPairListLennardJonesExpand):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLennardJonesExpand, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class LennardJonesExpand(Potential):
'The LennardJonesExpand potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.LennardJonesExpandLocal',
pmiproperty = ['epsilon', 'sigma', 'delta']
)
class VerletListLennardJonesExpand(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListLennardJonesExpandLocal',
pmicall = ['setPotential','getPotential']
)
class CellListLennardJonesExpand(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListLennardJonesExpandLocal',
pmicall = ['setPotential']
)
class FixedPairListLennardJonesExpand(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListLennardJonesExpandLocal',
pmicall = ['setPotential']
)
|
kkreis/espressopp
|
src/interaction/LennardJonesExpand.py
|
Python
|
gpl-3.0
| 6,647
|
[
"ESPResSo"
] |
82febc32f6b51f7a48eb6cde60b705788fbd5ec034b3af668def0f1a5be527a5
|
#! /usr/bin/python
import numpy as np
import cv2
import time
import sys
from debug_utils import *
show_imgs = False
def grayscale(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Or use RGB2GRAY if you read an image with mpimg
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def x_thresh(img, sobel_kernel=3, thresh=(0, 255)):
gray = grayscale(img)
# Take only Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
# Calculate the absolute value of the x derivative:
abs_sobelx = np.absolute(sobelx)
# Convert the absolute value image to 8-bit:
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Create binary image using thresholding
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return sxbinary
def mag_thresh(img, sobel_kernel=3, thresh=(0, 255)):
# Convert to grayscale
gray = grayscale(img)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx ** 2 + sobely ** 2)
# Rescale to 8 bit
scale_factor = np.max(gradmag) / 255
gradmag = (gradmag / scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= thresh[0]) & (gradmag <= thresh[1])] = 1
# Return the binary image
return binary_output
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi / 2)):
gray = grayscale(img)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
dir_grad = np.arctan2(abs_sobely, abs_sobelx)
binary_output = np.zeros_like(dir_grad)
binary_output[(dir_grad >= thresh[0]) & (dir_grad <= thresh[1])] = 1
return binary_output
def hsv_select(img, thresh_low, thresh_high):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
color_binary = np.zeros((img.shape[0], img.shape[1]))
color_binary[(hsv[:, :, 0] >= thresh_low[0]) & (hsv[:, :, 0] <= thresh_high[0])
& (hsv[:, :, 1] >= thresh_low[1]) & (hsv[:, :, 1] <= thresh_high[1])
& (hsv[:, :, 2] >= thresh_low[2]) & (hsv[:, :, 2] <= thresh_high[2])] = 1
return color_binary
def hls_select(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s = hls[:, :, 2]
s_binary = np.zeros_like(s)
s_binary[(s > thresh[0]) & (s <= thresh[1])] = 1
return s_binary
def warp(img):
img_size = (img.shape[1], img.shape[0])
src = np.float32([[130, 310], [231, 172], [431, 173], [563, 309]])
square_size = 100
img_x_half = 320
img_y_half = 240
y_offset = 150
dst = np.float32([[-square_size/2 + img_x_half, square_size/2 + img_y_half + y_offset],
[-square_size/2 + img_x_half, -square_size/2 + img_y_half + y_offset],
[square_size/2 + img_x_half, -square_size/2 + img_y_half + y_offset],
[square_size/2 + img_x_half, square_size/2 + img_y_half + y_offset]])
M = cv2.getPerspectiveTransform(src, dst)
# inverse
Minv = cv2.getPerspectiveTransform(dst, src)
# create a warped image
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
# unpersp = cv2.warpPerspective(warped, Minv, img_size, flags=cv2.INTER_LINEAR)
unpersp = img
return warped, unpersp, M
# Function for saving images to an output folder
def create_pathname(infile, ext):
temp1 = os.path.split(infile)[-1]
temp2 = os.path.splitext(temp1)[0] + ext
outfile = os.path.join("output1/", temp2)
return outfile
# Functions for drawing lines
def fit_lines(img):
# import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
binary_warped = img.copy()
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] / 2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
# Make this more robust
midpoint = np.int(histogram.shape[0] / 4) # lanes aren't always centered in the image
leftx_base = np.argmax(histogram[150:midpoint]) + 150 # Left lane shouldn't be searched from zero
rightx_base = np.argmax(histogram[midpoint: midpoint + 500]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0] / nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 80
# Set minimum number of pixels found to recenter window
minpix = 70
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (
nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (
nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
return left_fit, right_fit, out_img
# Define a function for creating lane lines
def lane_detector(image, video_mode=False):
# Undistort image
undist = image
# Define a kernel size and apply Gaussian smoothing
apply_blur = True
if apply_blur:
kernel_size = 5
undist = gaussian_blur(undist, kernel_size)
# Define parameters for gradient thresholding
sxbinary = x_thresh(undist, sobel_kernel=3, thresh=(22, 100))
mag_binary = mag_thresh(undist, sobel_kernel=3, thresh=(40, 100))
dir_binary = dir_threshold(undist, sobel_kernel=15, thresh=(0.7, 1.3))
# Define parameters for color thresholding
s_binary = hls_select(undist, thresh=(90, 255))
s_binary = s_binary == 0
# You can combine various thresholding operations
# Stack each channel to view their individual contributions in green and blue respectively
# This returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary, s_binary))
# Combine the two binary thresholds
combined_binary1 = np.zeros_like(sxbinary)
# combined_binary1[(s_binary == 1) | (sxbinary == 1)] = 1
combined_binary1[(s_binary == 1)] = 1
combined_binary2 = np.zeros_like(sxbinary)
combined_binary2[(s_binary == 1) | (sxbinary == 1) | (mag_binary == 1)] = 1
# Apply perspective transform
# Define points
warped_im, _, Minv = warp(combined_binary1)
return undist, sxbinary, s_binary, combined_binary1, warped_im, Minv
# Calculate Curvature
def curvature(left_fit, right_fit, binary_warped, print_data=True):
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
ym_per_pix = 30.0 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
# Define left and right lanes in pixels
leftx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
rightx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Identify new coefficients in metres
left_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit_cr[0])
# Calculation of center
# left_lane and right lane bottom in pixels
left_lane_bottom = (left_fit[0] * y_eval) ** 2 + left_fit[0] * y_eval + left_fit[2]
right_lane_bottom = (right_fit[0] * y_eval) ** 2 + right_fit[0] * y_eval + right_fit[2]
# Lane center as mid of left and right lane bottom
lane_center = (left_lane_bottom + right_lane_bottom) / 2.
center_image = 640
center = (lane_center - center_image) * xm_per_pix # Convert to meters
if print_data == True:
# Now our radius of curvature is in meters
print(left_curverad, 'm', right_curverad, 'm', center, 'm')
return left_curverad, right_curverad, center
def draw_lines(undist, warped, left_fit, right_fit, left_cur, right_cur, center, Minv, show_img=True):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])
# Fit new polynomials to x,y in world space
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (color_warp.shape[1], color_warp.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
return result
def process_img(image):
undist, sxbinary, s_binary, combined_binary1, warped_im, Minv = lane_detector(image)
imshow(undist, 'undist')
imshow(sxbinary*255, 'sxbinary')
imshow(s_binary*255, 's_binary')
imshow(combined_binary1*255, 'combined_binary1')
imshow(warped_im*255, 'warped_im')
left_fit, right_fit, out_img = fit_lines(warped_im)
print(left_fit, right_fit)
left_cur, right_cur, center = curvature(left_fit, right_fit, warped_im, print_data=True)
result = draw_lines(undist, warped_im, left_fit, right_fit, left_cur, right_cur, center, Minv, show_img=False)
if show_imgs:
imshow(undist, 'undist')
imshow(sxbinary*255, 'sxbinary')
imshow(s_binary*255, 's_binary')
imshow(combined_binary1*255, 'combined_binary1')
imshow(warped_im*255, 'warped_im')
imshow(out_img, 'out_img')
imshow(result, 'result')
return out_img
def main():
for i in range(1,16):
img = cv2.imread('igvc_sim_testset2/{}.png'.format(i), cv2.IMREAD_COLOR)
result = process_img(img)
cv2.imwrite("igvc_sim_testset_result/{}.png".format(i), result)
def main():
for i in range(68):
img = cv2.imread('igvc_sim_testset2/{}.png'.format(i), cv2.IMREAD_COLOR)
try:
result = process_img(img)
cv2.imwrite("igvc_sim_testset_result/{}.png".format(i), result)
except Exception as e:
print(e)
# break
if __name__ == '__main__':
main()
|
RockRaidersInc/ROS-Main
|
vision/lane_detection_test.py
|
Python
|
gpl-3.0
| 14,400
|
[
"Gaussian"
] |
ca030e469974505426269349128ed1dc4f0f986bb96dfec1f77a305277653051
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGenefilter(RPackage):
"""genefilter: methods for filtering genes from high-throughput experiments
Some basic functions for filtering genes."""
homepage = "https://bioconductor.org/packages/genefilter"
git = "https://git.bioconductor.org/packages/genefilter.git"
version('1.72.1', commit='b01b00a766982ef7d80b90a252085c8c4f085e1b')
version('1.72.0', commit='8cb0b2e73531a417d53e5625bcf436265cdbe101')
version('1.66.0', commit='1c4c471ccca873bf92dcf0b50f611eaa64c4f0cf')
version('1.64.0', commit='82e91b7751bae997b9c898c219ea201fd02a8512')
version('1.62.0', commit='eb119894f015c759f93f458af7733bdb770a22ad')
version('1.60.0', commit='c98f695253c330a9380b2b4ffa27f3b7d66773e4')
version('1.58.1', commit='ace2556049677f60882adfe91f8cc96791556fc2')
depends_on('r-biocgenerics@0.31.2:', when='@1.68.0:', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-annotate', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-s4vectors@0.9.42:', when='@:1.66.0', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-genefilter/package.py
|
Python
|
lgpl-2.1
| 1,396
|
[
"Bioconductor"
] |
428e4f3e7482b83b8bab6d1d7204374e2536ced07d9f94472cda8d8ceb3fd69c
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Parameters for passing between functions."""
import logging
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Tuple, Union
import hoomd
from ..molecules import Molecule, Trimer
logger = logging.getLogger(__name__)
class SimulationParams(object):
"""Store the parameters of the simulation."""
defaults: Dict[str, Any] = {
'hoomd_args': '',
'step_size': 0.005,
'temperature': 0.4,
'tau': 1.0,
'pressure': 13.5,
'tauP': 1.0,
'cell_dimensions': (30, 42),
'outfile_path': Path.cwd(),
'max_gen': 500,
'gen_steps': 20000,
'output_interval': 10000,
}
def __init__(self, **kwargs) -> None:
"""Create SimulationParams instance."""
self.parameters: Dict[str, Any] = deepcopy(self.defaults)
self.parameters.update(kwargs)
# I am using getattr over getattribute becuase of the lower search priority
# of getattr. This makes it a fallback, rather than the primary location
# for looking up attributes.
def __getattr__(self, key):
try:
return self.parameters.__getitem__(key)
except KeyError:
raise AttributeError
def __setattr__(self, key, value):
# setattr has a higher search priority than other functions, custom
# setters need to be added to the list below
if key in ['parameters']:
super().__setattr__(key, value)
else:
self.parameters.__setitem__(key, value)
def __delattr__(self, attr):
return self.parameters.__delitem__(attr)
@property
def temperature(self) -> Union[float, hoomd.variant.linear_interp]:
"""Temperature of the system."""
try:
return hoomd.variant.linear_interp([
(0, self.init_temp),
(int(self.num_steps*0.75), self.parameters.get('temperature', self.init_temp)),
(self.num_steps, self.parameters.get('temperature', self.init_temp)),
], zero='now')
except AttributeError:
return self.parameters.get('temperature')
@temperature.setter
def temperature(self, value: float) -> None:
self.parameters['temperature'] = value
@property
def molecule(self) -> Molecule:
"""Return the appropriate molecule.
Where there is no custom molecule defined then we return the molecule of
the crystal.
"""
if self.parameters.get('molecule') is not None:
mol = self.parameters.get('molecule')
elif self.parameters.get('crystal') is not None:
mol = self.crystal.molecule
else:
mol = Trimer()
return mol
@property
def cell_dimensions(self) -> Tuple[int, int]:
try:
self.crystal
return self.parameters.get('cell_dimensions')
except AttributeError:
raise AttributeError
@property
def group(self) -> hoomd.group.group:
"""Return the appropriate group."""
if self.parameters.get('group'):
return self.parameters.get('group')
if self.molecule.num_particles == 1:
return hoomd.group.all()
return hoomd.group.rigid_center()
@property
def outfile_path(self) -> Path:
"""Ensure the output directory is a path."""
if self.parameters.get('outfile_path'):
return Path(self.parameters.get('outfile_path'))
return Path.cwd()
@property
def outfile(self) -> str:
"""Ensure the output file is a string."""
if self.parameters.get('outfile') is not None:
return str(self.parameters.get('outfile'))
raise AttributeError('Outfile does not exist')
def filename(self, prefix: str=None) -> str:
"""Use the simulation parameters to construct a filename."""
base_string = '{molecule}-P{pressure:.2f}-T{temperature:.2f}'
if prefix:
base_string = '{prefix}-' + base_string
if self.parameters.get('moment_inertia_scale') is not None:
base_string += '-I{mom_inertia:.2f}'
if self.parameters.get('space_group') is not None:
base_string += '-{space_group}'
fname = base_string.format(
prefix=prefix,
molecule=self.molecule,
pressure=self.pressure,
temperature=self.parameters.get('temperature'),
mom_inertia=self.parameters.get('moment_inertia_scale'),
space_group = self.parameters.get('space_group'),
)
return str(self.outfile_path / fname)
class paramsContext(object):
"""Temporarily set parameter values with a context manager.
This is a context manager that can be used to temporarily set the values of a
SimulationParams instance. This simplifies the setup allowing for a single global
instance that is modified with every test. The modifications also make it clear
what is actually being tested.
"""
def __init__(self, sim_params: SimulationParams, **kwargs) -> None:
"""Initialise setValues class.
Args:
sim_params (class:`statdyn.simulation.params.SimulationParams`): The
instance that is to be temporarily modified.
Kwargs:
key: value
Any of the keys and values that are held by a SimulationParams instance.
"""
self.params = sim_params
self.modifications = kwargs
self.original = {key: sim_params.parameters.get(key)
for key in kwargs.keys()
if sim_params.parameters.get(key) is not None}
def __enter__(self) -> SimulationParams:
for key, val in self.modifications.items():
self.params.parameters[key] = val
logger.debug('Parameter on entry %s', str(self.params.parameters))
return self.params
def __exit__(self, exc_type, exc_value, traceback) -> None:
for key, _ in self.modifications.items():
del self.params.parameters[key]
self.params.parameters.update(self.original)
|
malramsay64/MD-Molecules-Hoomd
|
statdyn/simulation/params.py
|
Python
|
mit
| 6,319
|
[
"CRYSTAL"
] |
559d13fe763d088069598bebe43ac1562d23771712c6a719708cee1eced28958
|
'''
Module of classes used to test M-LOOP.
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import numpy as np
import threading
import mloop.utilities as mlu
import numpy.random as nr
import logging
import os
import time
class TestLandscape():
'''
Produces fake landscape data for testing, default functions are set for each of the methods which can then be over ridden.
Keyword Args:
num_parameters (Optional [int]): Number of parameters for landscape, defaults to 1.
'''
def __init__(self, num_params = 1):
self.log = logging.getLogger(__name__)
self.test_eval_num = 0
self.num_params = num_params
self.set_default_landscape()
def set_default_landscape(self):
'''
Set landscape functions to their defaults
'''
self.log.debug('Setting default landscapes')
self.cost_function = lambda p: np.sum(np.square(p))
self.noise_function = lambda p,c: (c,0)
self.bad_function = lambda p,c,u: (c,u,False)
self.expected_minima = np.zeros((self.num_params,))
def set_random_quadratic_landscape(self, min_region, max_region, random_scale=True, min_scale=0, max_scale=3):
'''
Make a quadratic function with a minimum randomly placed in a region with random scales
Args:
min_region (array): minimum region boundary
max_region (array): maximum region boundary
Keyword Args:
random_scale (Optional bool): If true randomize the scales of the parameters. Default True.
min_scale (float): Natural log of minimum scale factor. Default 0.
max_scale (float): Natural log of maximum scale factor. Default 3.
'''
mini = min_region + nr.rand(self.num_params) * (max_region - min_region)
scal = np.exp(min_scale + nr.rand(self.num_params) * (max_scale - min_scale))
self.set_quadratic_landscape(minimum = mini,scale = scal)
def set_quadratic_landscape(self, minimum = None, scale = None):
'''
Set deterministic part of landscape to be a quadratic.
with the formula::
c(x) = \sum_i a_i * (x_i - x_0,i)^2
where x_i are the parameters, x_0,i is the location of the minimum and a_i are the scaling factors.
Keyword Args:
minimum (Optional [array]): Location of the minimum. If set to None is at the origin. Default None.
scales (Optional [array]): scaling of quadratic along the dimention specified. If set to None the scaling is one.
'''
if minimum is None:
minimum = np.zeros((self.num_params,))
if scale is None:
scale = 1
self.cost_minimum = minimum
self.cost_scale = scale
self.cost_function = lambda p : np.sum(self.cost_scale*np.square(p - self.cost_minimum))
self.expected_minima = self.cost_minimum
self.log.debug('Test Minimum at:' + repr(self.cost_minimum))
self.log.debug('Test Scales are:' + repr(self.cost_scale))
self.log.debug('Test Cost minimum:' + repr(self.cost_function(p=self.cost_minimum)))
def set_noise_function(self, proportional=0.0, absolute=0.0):
'''
Adds noise to the function.
with the formula::
c'(c,x) = c (1 + s_p p) + s_a a
where s_i are gaussian random variables, p is the proportional noise factor and a is the absolute noise factor, and c is the cost before noise is added
the uncertainty is then::
u = sqrt((cp)^2 + a^2)
Keyword Args:
proportional (Optional [float]): the proportional factor. Defaults to 0
absolute (Optional [float]): the absolute factor. Defaults to 0
'''
self.noise_prop = proportional
self.noise_abs = absolute
self.noise_function = lambda p,c,u : (c *(1 + nr.normal()*self.noise_prop) + nr.normal()*self.noise_abs,np.sqrt((c*self.noise_prop)**2 + (self.noise_abs)**2))
def set_bad_region(self, min_boundary, max_boundary, bad_cost=None, bad_uncer=None):
'''
Adds a region to landscape that is reported as bad.
Args:
min_boundary (array): mininum boundary for bad region
max_boundary (array): maximum boundary for bad region
'''
self.bad_min_boundary = min_boundary
self.bad_max_boundary = max_boundary
if bad_cost is None and bad_uncer is None:
self.bad_function = lambda p,c,u : (c, u, np.all(p >= self.bad_min_boundary)&np.all(self.bad_max_boundary >= p))
elif bad_cost is not None and bad_uncer is not None:
self.bad_cost = bad_cost
self.bad_uncer = bad_uncer
self.bad_function = lambda p,c,u : (self.bad_cost,self.bad_uncer, True) if np.all(p >= self.bad_min_boundary)&np.all(self.bad_max_boundary >= p) else (c,u,False)
else:
self.log.error('When defining bad region behavoir bad_cost and bad_uncer must both be None or neither of them are none. bad_cost:' + repr(bad_cost) +'. bad_uncer' + repr(bad_uncer) )
raise ValueError
def get_cost_dict(self,params):
'''
Return cost from fake landscape given parameters.
Args:
params (array): Parameters to evaluate cost.
'''
self.test_eval_num +=1
self.log.debug('Test function called, num:' + repr(self.test_eval_num))
mean_cost = self.cost_function(p=params)
(noise_cost, noise_uncer) = self.noise_function(c=mean_cost,p=params)
(final_cost, final_uncer, final_bad) = self.bad_function(c=noise_cost,u=noise_uncer,p=params)
return_dict = {}
return_dict['cost']=final_cost
return_dict['uncer']=final_uncer
return_dict['bad']=final_bad
return_dict['mean_cost']=mean_cost
return_dict['noise_cost']=noise_cost
return_dict['noise_uncer']=noise_uncer
return_dict['test_eval_num']=self.test_eval_num
return return_dict
class FakeExperiment(threading.Thread):
'''
Pretends to be an experiment and reads files and prints files based on the costs provided by a TestLandscape. Executes as a thread.
Keyword Args:
test_landscape (Optional TestLandscape): landscape to generate costs from.
experiment_file_type (Optional [string]): currently supports: 'txt' where the output is a text file with the parameters as a list of numbers, and 'mat' a matlab file with variable parameters with the next_parameters. Default is 'txt'.
Attributes
self.end_event (Event): Used to trigger end of experiment.
'''
def __init__(self,
test_landscape = None,
experiment_file_type=mlu.default_interface_file_type,
exp_wait = 0,
poll_wait = 1,
**kwargs):
super(FakeExperiment,self).__init__()
if test_landscape is None:
self.test_landscape = TestLandscape()
else:
self.test_landscape = test_landscape
self.log = logging.getLogger(__name__)
self.exp_wait = float(exp_wait)
self.poll_wait = float(poll_wait)
self.out_file_type = str(experiment_file_type)
self.in_file_type = str(experiment_file_type)
self.total_out_filename = mlu.default_interface_in_filename + '.' + self.out_file_type
self.total_in_filename = mlu.default_interface_out_filename + '.' + self.in_file_type
self.end_event = threading.Event()
self.test_count =0
def set_landscape(self,test_landscape):
'''
Set new test landscape.
Args:
test_landscape (TestLandscape): Landscape to generate costs from.
'''
self.test_landscape = test_landscape
def run(self):
'''
Implementation of file read in and out. Put parameters into a file and wait for a cost file to be returned.
'''
self.log.debug('Entering FakeExperiment loop')
while not self.end_event.is_set():
if os.path.isfile(self.total_in_filename):
time.sleep(mlu.filewrite_wait) #wait for file to be written
try:
in_dict = mlu.get_dict_from_file(self.total_in_filename, self.in_file_type)
except IOError:
self.log.warning('Unable to open ' + self.total_in_filename + '. Trying again.')
continue
except (ValueError,SyntaxError):
self.log.error('There is something wrong with the syntax or type of your file:' + self.in_filename + '.' + self.in_file_type)
raise
os.remove(self.total_in_filename)
self.test_count +=1
self.log.debug('Test exp evaluating cost. Num:' + repr(self.test_count))
try:
params = in_dict['params']
except KeyError as e:
self.log.error('You are missing ' + repr(e.args[0]) + ' from the in params dict you provided through the queue.')
raise
cost_dict = self.test_landscape.get_cost_dict(params)
time.sleep(self.exp_wait)
mlu.save_dict_to_file(cost_dict, self.total_out_filename, self.out_file_type)
else:
time.sleep(self.poll_wait)
self.log.debug('Ended FakeExperiment')
|
charmasaur/M-LOOP
|
mloop/testing.py
|
Python
|
mit
| 9,788
|
[
"Gaussian"
] |
a25581e893fe99d6c9ab8f288689f14cb5049853e7637108c565031bd9df4a50
|
#import urllib2
import urllib
import subprocess
import time
import os.path
import sys
import getopt
from Bio.PDB import *
import openbabel
import pybel
import yaml
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
import re
import os
from collections import Counter
import numpy as np
import collections
from math import pi, degrees
from operator import itemgetter, attrgetter, methodcaller
import getopt
import sys
import shutil
AA = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D',
'CYS': 'C', 'GLN': 'Q', 'GLU': 'E', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S',
'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V'}
HBD = {'H', 'K', 'N', 'Q', 'R', 'S', 'T', 'W', 'Y'}
HBA = {'D', 'E', 'H', 'N', 'Q', 'S', 'T', 'Y'}
NEGATIVE = {'D', 'E'}
POSITIVE = {'H', 'K', 'R'}
AROMATIC = {'TYR', 'TRP', 'PHE', 'HIS'}
CHARGEDAA = {'ARG', 'LYS', 'ASP', 'GLU'} # skip ,'HIS'
HYDROPHOBIC_AA = {'A', 'C', 'F', 'I', 'L', 'M', 'P', 'V', 'W', 'Y'}
projectdir = '/tmp/interactions/'
if not os.path.exists(projectdir):
os.makedirs(projectdir)
os.chmod(projectdir, 0o777)
tempdir = projectdir + 'temp/'
if not os.path.exists(tempdir):
os.makedirs(tempdir)
os.chmod(tempdir, 0o777)
ignore_het = ['NA', 'W'] # ignore sodium and water
radius = 5
hydrophob_radius = 4.5
ignore_het = ['NA', 'W'] # ignore sodium and water
debug = False
def fetch_pdb(id):
url = 'http://www.rcsb.org/pdb/files/%s.pdb' % id
return urllib.urlopen(url).read()
def check_unique_ligand_mol(filename):
# check that only HETATM are exported to file
f_in = open(filename, 'r')
tempstr = ''
check = []
ligandid = 0
chainid = 0
for line in f_in:
if line.startswith('HETATM'):
residue_number = line[22:26]
chain = line[21]
if (residue_number != ligandid and ligandid != 0) or (chain != chainid and chainid != 0):
continue
ligandid = residue_number
chainid = chain
tempstr += line
f_in.close()
f = open(filename, 'w')
f.write(tempstr)
f.close()
def check_pdb():
# check if PDB is there, otherwise fetch
if not os.path.exists(projectdir + 'pdbs/'):
os.makedirs(projectdir + 'pdbs/')
if not os.path.isfile(projectdir + 'pdbs/' + pdbname + '.pdb'):
pdbfile = fetch_pdb(pdbname)
temp_path = projectdir + 'pdbs/' + pdbname + '.pdb'
f = open(temp_path, 'w')
f.write(pdbfile)
f.close()
def checkdirs():
# check that dirs are there and have right permissions
directory = projectdir + 'results/' + pdbname
if os.path.exists(directory):
shutil.rmtree(directory)
directory = projectdir + 'results/' + pdbname + '/interaction'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
directory = projectdir + 'results/' + pdbname + '/ligand'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
directory = projectdir + 'results/' + pdbname + '/output'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
directory = projectdir + 'results/' + pdbname + '/png'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
directory = projectdir + 'results/' + pdbname + '/fragments'
if not os.path.exists(directory):
os.makedirs(directory)
os.chmod(directory, 0o777)
def find_ligand_full_names():
pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb'
residuename = ''
f_in = open(pdbfile, 'r')
d = {}
for line in f_in:
if line.startswith('HETSYN'):
# need to fix bad PDB formatting where col4 and col5 are put
# together for some reason -- usually seen when the id is +1000
m = re.match("HETSYN[\s]+([\w]{3})[\s]+(.+)", line)
if (m):
d[m.group(1)] = m.group(2).strip()
return d
def fragment_library(ligand, atomvector, atomname, residuenr, chain, typeinteraction):
#if debug:
#print "Make fragment pdb file for ligand:", ligand, "atom vector", atomvector, "atomname", atomname, "residuenr from protein", residuenr, typeinteraction, 'chain', chain
residuename = 'unknown'
ligand_pdb = projectdir + 'results/' + pdbname + \
'/ligand/' + ligand + '_' + pdbname + '.pdb'
mol = pybel.readfile("pdb", ligand_pdb).next()
mol.removeh()
listofvectors = []
chain = chain.strip()
if atomvector is not None:
for atom in mol:
distance = (Vector(getattr(atom, 'coords')) - atomvector).norm()
if distance > 0.1:
continue
# print "Parent:",getattr(atom,'type'),getattr(atom,'idx')
# ,Vector(getattr(atom,'coords'))
listofvectors.append(Vector(getattr(atom, 'coords')))
for neighbour_atom in openbabel.OBAtomAtomIter(atom.OBAtom):
# print neighbour_atom.GetAtomicNum()
neighbor = pybel.Atom(neighbour_atom)
# print
# "Neighbour:",neighbour_atom.GetType(),Vector(getattr(neighbor,'coords'))
listofvectors.append(Vector(getattr(neighbor, 'coords')))
for neighbour_atom2 in openbabel.OBAtomAtomIter(neighbour_atom):
# print neighbour_atom.GetAtomicNum()
neighbor2 = pybel.Atom(neighbour_atom2)
# print
# "Neighbour2:",neighbour_atom2.GetType(),Vector(getattr(neighbor2,'coords'))
listofvectors.append(Vector(getattr(neighbor2, 'coords')))
#if debug:
#print "vectors:", listofvectors
pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb'
f_in = open(pdbfile, 'r')
tempstr = ''
for line in f_in:
if line.startswith('HETATM'):
atomvector = Vector(line[30:38], line[38:46], line[46:54])
residue_number = line[22:26]
tempchain = line[21]
skip = 1
for targetvector in listofvectors:
distance = (targetvector - atomvector).norm()
if distance < 0.1:
# print "FOUND!"
skip = 0
if skip == 1:
continue
elif line.startswith('ATOM'):
residue_number = line[22:26].strip()
tempchain = line[21].strip()
if residue_number != residuenr:
continue
if tempchain != chain:
continue
residuenr = residue_number
chain = tempchain
residuename = line[17:20].strip()
else:
continue # ignore all other lines
tempstr += line
filename = projectdir + 'results/' + pdbname + '/fragments/' + pdbname + "_" + ligand + \
"_" + residuename + residuenr + chain + "_" + \
atomname + "_" + typeinteraction + ".pdb"
# if debug:
# print filename
f_in.close()
f = open(filename, 'w')
f.write(tempstr)
f.close()
mol = pybel.readfile("pdb", filename).next()
mol.write("pdb", filename, overwrite=True)
return filename
def fragment_library_aromatic(ligand, atomvectors, residuenr, chain, ringnr):
# print "Make aromatic fragment pdb file for ligand:",ligand,"atom
# vectors",atomvectors,"residuenr from protein", residuenr
chain = chain.strip()
pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb'
residuename = ''
f_in = open(pdbfile, 'r')
tempstr = ''
for line in f_in:
if line.startswith('HETATM'):
atomvector = Vector(line[30:38], line[38:46], line[46:54])
skip = 1
for targetvector in atomvectors:
distance = (targetvector - atomvector).norm()
if distance < 0.1:
# print "FOUND!"
skip = 0
if skip == 1:
continue
elif line.startswith('ATOM'):
residue_number = line[22:26].strip()
tempchain = line[21].strip()
if residue_number != residuenr:
continue
if tempchain != chain:
continue
residuename = line[17:20].strip()
chain = tempchain
else:
continue # ignore all other lines
tempstr += line
filename = projectdir + 'results/' + pdbname + '/fragments/' + pdbname + "_" + ligand + \
"_" + residuename + str(residuenr) + chain + \
"_aromatic_" + str(ringnr) + ".pdb"
# print tempstr
f_in.close()
f = open(filename, 'w')
f.write(tempstr)
f.close()
return filename
def create_ligands_and_poseview():
class HetSelect(Select):
def accept_residue(self, residue):
if residue.get_resname().strip() == HETNAM:
return 1
else:
return 0
class ClassSelect(Select):
def accept_residue(self, residue):
if residue.get_parent().id == peptideligand:
return 1
else:
return 0
p = PDBParser(QUIET=True)
s = p.get_structure(pdbname, projectdir + 'pdbs/' +
pdbname + '.pdb') # Disable warnings
hetflag_done = {}
for model in s:
for chain in model:
for residue in chain:
hetresname = residue.get_resname()
# catch residues with hetflag
hetflag = residue.get_full_id()[3][0].strip()
hetflag = hetflag.replace("H_", "").strip()
#hetflag = hetflag.replace("W","")
#print(hetflag)
if peptideligand and chain.id==peptideligand:
hetflag= 'pep'
if peptideligand and chain.id!=peptideligand:
continue
if hetflag and hetflag not in ignore_het:
if not hetflag in hetflag_done:
hetflag_done[hetflag] = 1
HETNAM = hetflag
temp_path = projectdir + 'results/' + pdbname + \
'/ligand/' + HETNAM + '_' + pdbname + '.sdf'
ligand_pdb = projectdir + 'results/' + pdbname + \
'/ligand/' + HETNAM + '_' + pdbname + '.pdb'
ligand_sdf = projectdir + 'results/' + pdbname + \
'/ligand/' + HETNAM + '_' + pdbname + '.sdf'
ligand_inchi = projectdir + 'results/' + pdbname + \
'/ligand/' + HETNAM + '_' + pdbname + '.inchi'
ligand_poseview = projectdir + 'results/' + \
pdbname + '/png/' + pdbname + '_' + HETNAM + '.png'
ligand_png = projectdir + 'results/' + pdbname + '/png/' + HETNAM + '.png'
# if sdf not made, make it #Always make them for now
if not os.path.isfile(ligand_pdb) or 1 == 1:
io = PDBIO()
io.set_structure(s)
if peptideligand and chain.id==peptideligand:
io.save(ligand_pdb, ClassSelect())
else:
io.save(ligand_pdb, HetSelect())
check_unique_ligand_mol(ligand_pdb)
if len(list(pybel.readfile("pdb", ligand_pdb))) == 0:
continue
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("pdb", "inchi")
obConversion.SetOptions(
"K", obConversion.OUTOPTIONS)
mol = openbabel.OBMol()
# Open Babel will uncompress automatically
obConversion.ReadFile(mol, ligand_pdb)
obConversion.WriteFile(mol, ligand_inchi)
inchikey = obConversion.WriteString(mol)
inchikeys[HETNAM] = inchikey.strip()
#smiles[HETNAM] = smile
smiles[HETNAM] = pybel.readfile(
"pdb", ligand_pdb).next().write("smi").split("\t")[0]
mol = pybel.readfile("pdb", ligand_pdb).next()
mol.OBMol.AddHydrogens(False, True, 7.4)
mol.write("pdb", ligand_pdb, overwrite=True)
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("pdb", "sdf")
mol = openbabel.OBMol()
# Open Babel will uncompress automatically
obConversion.ReadFile(mol, ligand_pdb)
obConversion.WriteFile(mol, ligand_sdf)
# if png of ligand not made, make it
if not os.path.isfile(ligand_png):
m = Chem.MolFromMolFile(ligand_sdf)
# Draw.MolToFile(m,ligand_png)
# if interaction png not made, make it #SKIP poseview
# stuff
if not os.path.isfile(ligand_poseview) and 1 == 2:
cmd = "poseview -l " + ligand_sdf + " -p " + projectdir + \
"pdbs/" + pdbname + ".pdb -o " + ligand_poseview
#print('Running cmd ' + cmd)
proc = subprocess.Popen(
[cmd], stdout=subprocess.PIPE, shell=True)
while proc.poll() is None:
time.sleep(1)
#(out, err) = proc.communicate()
else:
# print "Already made
# Poseview:",pdbname+"_"+HETNAM+".png"
continue
# print "Done "+str(len(hetflag_done))
def addresiduestoligand(ligand, pdb, residuelist):
temp_path = projectdir + 'pdbs/' + pdb + '.pdb'
f_in = open(temp_path, 'r')
inserstr = ''
check = []
# print filename
ligandid = 0
chainid = 0
for line in f_in:
if line.startswith('ATOM'):
temp = line.split()
# need to fix bad PDB formatting where col4 and col5 are put
# together for some reason -- usually seen when the id is +1000
m = re.match("(\w)(\d+)", temp[4])
if (m):
temp[4] = m.group(1)
temp[5] = m.group(2)
aaname = temp[3] + temp[5] + temp[4]
if aaname in residuelist:
# print aaname
inserstr += line
# print inserstr
f_in.close()
# ligands/'+hetflag+'_'+pdbname+".pdb")
temp_path = projectdir + 'results/' + pdbname + \
'/ligand/' + ligand + '_' + pdb + '.pdb'
f_in = open(temp_path, 'r')
tempstr = ''
inserted = 0
for line in f_in:
if line.startswith('ATOM'):
temp = line.split()
if temp[2] == 'H':
continue # skip hydrogen in model
if (line.startswith('CONECT') or line.startswith('MASTER') or line.startswith('END')) and inserted == 0:
tempstr += inserstr
inserted = 1
tempstr += line
# print tempstr
# print tempstr
f_in.close()
f = open(projectdir + 'results/' + pdbname +
'/interaction/' + pdb + '_' + ligand + '.pdb', 'w')
f.write(tempstr)
f.close()
def get_ring_from_aa(residueid):
class AAselect(Select):
def accept_residue(self, residue):
# print residue.get_full_id()[3][1],residueid
if str(residue.get_full_id()[3][1]) == residueid:
return 1
else:
return 0
ptemp = PDBParser(QUIET=True) # disable warnings
stemp = ptemp.get_structure(
pdbname, projectdir + 'pdbs/' + pdbname + '.pdb')
temp_aa_id = residueid
io = PDBIO()
io.set_structure(stemp)
io.save(projectdir + 'temp/' + residueid + '.pdb', AAselect())
mol = pybel.readfile("pdb", projectdir + 'temp/' +
residueid + '.pdb').next()
# print hetflag
rings = getattr(mol, "OBMol").GetSSSR()
ringlist = []
for ring in rings:
center = Vector(0.0, 0.0, 0.0)
members = ring.Size()
if ring.IsAromatic():
atomlist = []
atomnames = []
atomvectors = []
for atom in mol:
if ring.IsMember(atom.OBAtom):
a_vector = Vector(getattr(atom, 'coords'))
center += a_vector
atomlist.append(atom.idx)
atomvectors.append(a_vector)
atomnames.append(getattr(atom, 'type'))
center = center / members
normal = center - a_vector # vector in plane
normal1 = center - atomvectors[0]
normal2 = center - atomvectors[2]
normal = Vector(np.cross([normal1[0],normal1[1],normal1[2]],[normal2[0],normal2[1],normal2[2]]))
ringlist.append([atomlist, center, normal, atomnames, atomvectors])
return ringlist
def get_hydrogen_from_aa(residueid):
class AAselect(Select):
def accept_residue(self, residue):
# print residue.get_full_id()[3][1],residueid
if str(residue.get_full_id()[3][1]) == residueid:
return 1
else:
return 0
ptemp = PDBParser(QUIET=True)
stemp = ptemp.get_structure(
pdbname, projectdir + 'pdbs/' + pdbname + '.pdb')
temp_aa_id = residueid
io = PDBIO()
io.set_structure(stemp)
io.save(projectdir + 'temp/' + residueid + '.pdb', AAselect())
mol = pybel.readfile("pdb", projectdir + 'temp/' +
residueid + '.pdb').next()
mol.OBMol.AddHydrogens(False, True, 7.4)
# print hetflag
donors = []
for atom in mol:
if getattr(atom, 'OBAtom').IsHbondDonor():
chargevector = Vector(getattr(atom, 'coords'))
# print getattr(atom,'type')," is Donor",chargevector
temphatoms = []
for neighbor in pybel.ob.OBAtomAtomIter(atom.OBAtom):
neighbor = pybel.Atom(neighbor)
if getattr(neighbor, 'type') == "H":
# print "neighbor
# Atom",getattr(neighbor,'type'),"Coords:",getattr(neighbor,'coords')
temphatoms.append(Vector(getattr(neighbor, 'coords')))
donors.append([getattr(atom, 'type'), chargevector, temphatoms,getattr(atom, 'OBAtom').IsHbondAcceptor()])
if getattr(atom, 'OBAtom').IsHbondAcceptor():
chargevector = Vector(getattr(atom, 'coords'))
#print getattr(atom, 'type'),chargevector,'acceptor!'
return donors
def build_ligand_info():
count_atom_ligand = {}
p = PDBParser(QUIET=True)
s = p.get_structure(pdbname, projectdir + 'pdbs/' + pdbname + '.pdb')
for model in s:
for chain in model:
for residue in chain:
hetresname = residue.get_resname()
# catch residues with hetflag
hetflag = residue.get_full_id()[3][0].strip()
hetflag = hetflag.replace("H_", "").strip()
#hetflag = hetflag.replace("W","")
if peptideligand and chain.id==peptideligand:
hetflag= 'pep'
if peptideligand and chain.id!=peptideligand:
continue
if hetflag and hetflag not in ignore_het:
# if goodhet!='' and hetflag!=goodhet and
# "H_"+goodhet!=hetflag: continue ### Only look at the
# ligand that has an image from poseview made for it.
if hetflag not in hetlist or (peptideligand and chain.id==peptideligand):
if len(list(pybel.readfile("pdb", projectdir + 'results/' + pdbname + '/ligand/' + hetflag + '_' + pdbname + '.pdb'))) == 0:
# This ligand has no molecules
# print('no info for',hetflag)
continue
if hetflag not in hetlist: #do not recreate for peptides
hetlist[hetflag] = []
ligand_charged[hetflag] = []
ligand_donors[hetflag] = []
ligand_acceptors[hetflag] = []
count_atom_ligand[hetflag] = 0
mol = pybel.readfile(
"pdb", projectdir + 'results/' + pdbname + '/ligand/' + hetflag + '_' + pdbname + ".pdb").next()
# print "LIGAND",hetflag
rings = getattr(mol, "OBMol").GetSSSR()
# http://python.zirael.org/e-openbabel4.html
ringlist = []
for ring in rings:
center = Vector(0.0, 0.0, 0.0)
members = ring.Size()
if ring.IsAromatic():
# print "Found an aromatic ring"
atomlist = []
atomnames = []
vectorlist = []
for atom in mol:
if ring.IsMember(atom.OBAtom):
# print atom.idx,getattr(atom,'type'),
# ring.IsMember( atom.OBAtom)
a_vector = Vector(
getattr(atom, 'coords'))
center += a_vector
atomlist.append(atom.idx)
vectorlist.append(a_vector)
atomnames.append(getattr(atom, 'type'))
center = center / members
normal = center - a_vector # vector in plane
#print center - vectorlist[0],center - vectorlist[2]
normal1 = center - vectorlist[0]
normal2 = center - vectorlist[2]
normal = Vector(np.cross([normal1[0],normal1[1],normal1[2]],[normal2[0],normal2[1],normal2[2]]))
ringlist.append(
[atomlist, center, normal, atomnames, vectorlist])
ligand_rings[hetflag] = ringlist
for atom in mol:
#print "Atom",getattr(atom,'type'),"Coords:",getattr(atom,'coords'),"FormalCharge:",getattr(atom,'formalcharge'),"PartialCharge",getattr(atom,'partialcharge')
if getattr(atom, 'formalcharge') != 0:
chargevector = Vector(getattr(atom, 'coords'))
ligand_charged[hetflag].append(
[getattr(atom, 'type'), chargevector, getattr(atom, 'formalcharge')])
if getattr(atom, 'OBAtom').IsCarboxylOxygen():
chargevector = Vector(getattr(atom, 'coords'))
# print getattr(atom,'type')," is
# CarboxylOxygen",chargevector
ligand_charged[hetflag].append(
[getattr(atom, 'type'), chargevector, -1])
if getattr(atom, 'OBAtom').IsHbondDonor():
chargevector = Vector(getattr(atom, 'coords'))
# print getattr(atom,'type')," is
# Donor",chargevector
temphatoms = []
for neighbor in pybel.ob.OBAtomAtomIter(atom.OBAtom):
neighbor = pybel.Atom(neighbor)
if getattr(neighbor, 'type') == "H":
# print "neighbor
# Atom",getattr(neighbor,'type'),"Coords:",getattr(neighbor,'coords')
temphatoms.append(
Vector(getattr(neighbor, 'coords')))
ligand_donors[hetflag].append(
[getattr(atom, 'type'), chargevector, temphatoms])
if getattr(atom, 'OBAtom').IsHbondAcceptor():
chargevector = Vector(getattr(atom, 'coords'))
# print getattr(atom,'type')," is Acceptor",chargevector
ligand_acceptors[hetflag].append([getattr(atom, 'type'), chargevector])
# ligand_charged[hetflag].append([getattr(atom,'type'),chargevector,-1])
# Function to get ligand centers to maybe skip some
# residues
check = 0
center = Vector(0.0, 0.0, 0.0)
if peptideligand and chain.id==peptideligand:
if hetflag in ligandcenter:
center = ligandcenter[hetflag][2]
for atom in residue:
het_atom = atom.name
atom_vector = atom.get_vector()
center += atom_vector
hetlist[hetflag].append(
[hetresname, het_atom, atom_vector])
if not hetflag in ligand_atoms:
# make the ligand_atoms ready
ligand_atoms[hetflag] = []
ligand_atoms[hetflag].append(
[count_atom_ligand[hetflag], atom_vector, het_atom])
count_atom_ligand[hetflag] += 1
ligandcenter[hetflag] = [center, count_atom_ligand[hetflag]]
else:
for atom in residue:
if check == 0 and hetflag in ligand_atoms:
continue # skip when there are many of same ligand
het_atom = atom.name
check = 1
atom_vector = atom.get_vector()
center += atom_vector
hetlist[hetflag].append(
[hetresname, het_atom, atom_vector])
if not hetflag in ligand_atoms:
# make the ligand_atoms ready
ligand_atoms[hetflag] = []
ligand_atoms[hetflag].append(
[count_atom_ligand[hetflag], atom_vector, het_atom])
count_atom_ligand[hetflag] += 1
center2 = center / count_atom_ligand[hetflag]
ligandcenter[hetflag] = [
center2, count_atom_ligand[hetflag],center]
def remove_hyd(aa,ligand):
templist = []
for res in new_results[ligand]['interactions']:
#print res[0],res[2],aa
if res[0]==aa and (res[2]=='HYD' or res[2]=='hyd'):
continue
else:
templist.append(res)
new_results[ligand]['interactions'] = templist
def check_other_aromatic(aa,ligand,info):
templist = []
check = True
for res in new_results[ligand]['interactions']:
#print res[0],res[2],aa
if res[0]==aa and res[4]=='aromatic':
#if the new aromatic interaction has a center-center distance greater than the old one, keep old.
if info['Distance']>res[6]['Distance']:
templist.append(res)
check = False #Do not add the new one.
else: #if not, delete the old one, as the new is better.
check = True #add the new one
continue
else:
templist.append(res)
new_results[ligand]['interactions'] = templist
return check
# LOOP OVER RECEPTOR AND FIND INTERACTIONS
def find_interactions():
global count_calcs, count_skips
count_atom = 0
count_skips = 0
count_calcs = 0
p = PDBParser(QUIET=True)
s = p.get_structure(pdbname, projectdir + 'pdbs/' + pdbname + '.pdb')
for model in s:
for chain in model:
chainid = chain.get_id()
if peptideligand and chainid==peptideligand:
continue
for residue in chain:
aa_resname = residue.get_resname()
aa_seqid = str(residue.get_full_id()[3][1])
hetflagtest = str(residue.get_full_id()[3][0]).strip()
aaname = aa_resname + aa_seqid + chainid
hetflagtest = hetflagtest.replace("H_", "")
#hetflagtest = hetflagtest.replace("W","")
if hetflagtest:
continue # residue is a hetnam
if hetflagtest in hetlist:
continue # residue is a hetnam
# print "Looking at ",aa_resname,aa_seqid,chainid
countresidue = count_atom
# print aaname
# could probably make a check here to see if this residue was
# anywhere near the ligand, otherwise skip the check per atom
for hetflag, atomlist in hetlist.iteritems():
if not 'CA' in residue: # prevent errors
continue
ca = residue['CA'].get_vector()
if (ca - ligandcenter[hetflag][0]).norm() > ligandcenter[hetflag][1]:
# print "skipping"
count_skips += 1
continue
count_atom = countresidue
sum = 0
hydrophobic_count = 0
accesible_check = 0
# if goodhet!='' and hetflag!=goodhet and
# "H_"+goodhet!=hetflag: continue ### Only look at the
# ligand that has an image from poseview made for it.
tempdistance = radius
for atom in atomlist:
#print(hetflag,atom)
hetresname = atom[0]
het_atom = atom[1]
het_vector = atom[2]
hydrophobic_check = 1
aaatomlist = []
for atom in residue:
count_atom += 1
aa_vector = atom.get_vector()
aa_atom = atom.name
aa_atom_type = atom.element
aaatomlist.append([count_atom, aa_vector, aa_atom])
d = (het_vector - aa_vector)
count_calcs += 1
if d.norm() < radius:
if not hetflag in results:
results[hetflag] = {}
summary_results[hetflag] = {'score': [], 'hbond': [], 'hbondplus': [],
'hbond_confirmed': [], 'aromatic': [],'aromaticff': [],
'ionaromatic': [], 'aromaticion': [], 'aromaticef': [],
'aromaticfe': [], 'hydrophobic': [], 'waals': [], 'accessible':[]}
new_results[hetflag] = {'interactions':[]}
if not aaname in results[hetflag]:
results[hetflag][aaname] = []
if not (het_atom[0] == 'H' or aa_atom[0] == 'H' or aa_atom_type=='H'):
#print(aa_atom_type)
results[hetflag][aaname].append([het_atom, aa_atom, round(
d.norm(), 2), het_vector, aa_vector, aa_seqid, chainid])
tempdistance = round(d.norm(), 2)
sum += 1
# if both are carbon then we are making a hydrophic
# interaction
if het_atom[0] == 'C' and aa_atom[0] == 'C' and d.norm() < hydrophob_radius and hydrophobic_check:
hydrophobic_count += 1
hydrophobic_check = 0
if d.norm() < 5 and (aa_atom!='C' and aa_atom!='O' and aa_atom!='N'):
#print(aa_atom)
accesible_check = 1
if accesible_check: #if accessible!
summary_results[hetflag]['accessible'].append(
[aaname])
fragment_file = fragment_library(hetflag, None, '',
aa_seqid, chainid, 'access')
new_results[hetflag]['interactions'].append([aaname,fragment_file,'acc','accessible','hidden',''])
if hydrophobic_count > 2 and AA[aaname[0:3]] in HYDROPHOBIC_AA: # min 3 c-c interactions
summary_results[hetflag]['hydrophobic'].append(
[aaname, hydrophobic_count])
fragment_file = fragment_library(hetflag, None, '',
aa_seqid, chainid, 'hydrop')
new_results[hetflag]['interactions'].append([aaname,fragment_file,'hyd','hydrophobic','hydrophobic',''])
if sum > 1 and aa_resname in AROMATIC:
# if debug:
# , get_ring_atoms(aaatomlist)
# print "Need to analyse aromatic ring in ", aaname
aarings = get_ring_from_aa(aa_seqid)
if not aarings:
# print "Could not find aromatic ring in",aaname
continue
#print "amount of rings in AA",len(aarings)
for aaring in aarings:
#aaring = aaring[0] # res_ring
center = aaring[1]
count = 0
#print "AARING",aaring
for ring in ligand_rings[hetflag]:
# print ring
shortest_center_het_ring_to_res_atom = 10
shortest_center_aa_ring_to_het_atom = 10
# print aaring[4]
# print ring[4]
for a in aaring[4]:
if (ring[1] - a).norm() < shortest_center_het_ring_to_res_atom:
shortest_center_het_ring_to_res_atom = (ring[1] - a).norm()
for a in ring[4]:
if (center - a).norm() < shortest_center_aa_ring_to_het_atom:
shortest_center_aa_ring_to_het_atom = (center - a).norm()
count += 1
# take vector from two centers, and compare against
# vector from center to outer point -- this will
# give the perpendicular angel.
angle = Vector.angle(center - ring[1], ring[2]) #aacenter to ring center vs ring normal
# take vector from two centers, and compare against
# vector from center to outer point -- this will
# give the perpendicular angel.
angle2 = Vector.angle(center - ring[1], aaring[2]) #aacenter to ring center vs AA normal
angle3 = Vector.angle(ring[2], aaring[2]) #two normal vectors against eachother
#print "angleaa",aaring[2],"anglelig",ring[2]
angle_degrees = [
round(degrees(angle), 1), round(degrees(angle2), 1), round(degrees(angle3), 1)]
distance = (center - ring[1]).norm()
#if debug:
#print aaname,"Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', shortest_center_het_ring_to_res_atom, 'Shortest lig->rescenter', shortest_center_aa_ring_to_het_atom
if distance < 5 and (angle_degrees[2]<20 or abs(angle_degrees[2]-180)<20): # poseview uses <5
# print "Ring
# #",count,"Distance:",round(distance,2),
# "Angle:",round(angle_degrees,2)
summary_results[hetflag]['aromatic'].append(
[aaname, count, round(distance, 2), angle_degrees])
fragment_file = fragment_library_aromatic(
hetflag, ring[4], aa_seqid, chainid, count)
if debug:
print aaname,"F2F Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2)
if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}):
new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_ff','aromatic (face-to-face)','aromatic','none',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}])
remove_hyd(aaname,hetflag)
# need to be careful for edge-edge
elif (shortest_center_aa_ring_to_het_atom < 4.5) and abs(angle_degrees[0]-90)<30 and abs(angle_degrees[2]-90)<30:
summary_results[hetflag]['aromaticfe'].append(
[aaname, count, round(distance, 2), angle_degrees])
fragment_file = fragment_library_aromatic(
hetflag, ring[4], aa_seqid, chainid, count)
if debug:
print aaname,"FE Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2)
if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}):
new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_fe_protein','aromatic (face-to-edge)','aromatic','protein',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}])
remove_hyd(aaname,hetflag)
# need to be careful for edge-edge
elif (shortest_center_het_ring_to_res_atom < 4.5) and abs(angle_degrees[1]-90)<30 and abs(angle_degrees[2]-90)<30:
summary_results[hetflag]['aromaticef'].append(
[aaname, count, round(distance, 2), angle_degrees])
fragment_file = fragment_library_aromatic(
hetflag, ring[4], aa_seqid, chainid, count)
if debug:
print aaname,"EF Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2)
if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}):
new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_ef_protein','aromatic (edge-to-face)','aromatic','protein',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}])
remove_hyd(aaname,hetflag)
for charged in ligand_charged[hetflag]:
distance = (center - charged[1]).norm()
# needs max 4.2 distance to make aromatic+
if distance < 4.2 and charged[2] > 0:
if debug:
print "Ring #", count, "Distance:", round(distance, 2), "Angle:", round(angle_degrees, 2)
summary_results[hetflag]['aromaticion'].append(
[aaname, count, round(distance, 2), charged])
#FIXME fragment file
new_results[hetflag]['interactions'].append([aaname,'','aro_ion_protein','aromatic (pi-cation)','aromatic','protein',{'Distance':round(distance, 2)}])
remove_hyd(aaname,hetflag)
if sum > 2 and aa_resname in CHARGEDAA and ligand_rings[hetflag]:
# print "check for charged AA to aromatic
# rings!",aa_resname,hetflag
for atom in residue:
aa_vector = atom.get_vector()
aa_atom = atom.name
for ring in ligand_rings[hetflag]:
d = (ring[2] - aa_vector).norm()
# if d<10: print
# "aa_atom",aa_atom,aaname,"distance to a
# ring",d,hetflag,aa_resname
def analyze_interactions():
for ligand, result in results.iteritems():
# print "AA close to ligands ("+ligand+"): ",list(result.keys())
# print "Results for"+ligand
sortedresults = []
ligscore = 0
for residue, interaction in result.iteritems():
sum = 0
score = 0
hbond = []
hbondplus = []
type = 'waals'
for entry in interaction:
hbondconfirmed = []
if entry[2] <= 3.5:
# print(entry)
# if debug:
# print "Likely H-Bond", entry
if entry[0][0] == 'C' or entry[1][0] == 'C':
continue # If either atom is C then no hydrogen bonding
# if entry[1] == 'N': #if residue atom is N, then it is backbone!
# print('backbone interaction!')
aa_donors = get_hydrogen_from_aa(entry[5])
hydrogenmatch = 0
res_is_acceptor = False
res_is_donor = False
for donor in aa_donors:
d = (donor[1] - entry[4]).norm()
if d < 0.5:
#print 'found donor in residue',residue,entry,donor
hydrogens = donor[2]
res_is_acceptor = donor[3]
res_is_donor = True
for hydrogen in hydrogens:
hydrogenvector = hydrogen - donor[1]
bindingvector = entry[3] - hydrogen
angle = round(degrees(Vector.angle(
hydrogenvector, bindingvector)), 2)
distance = round(bindingvector.norm(), 2)
# print "RESDONOR",residue,"From
# ligand",entry[0],"To
# AA",entry[1],"HydrogenCheck
# angle",angle,"Distance from hydrogen to
# acceptor",distance
if distance > 2.5:
# print "Too far away"
continue
if angle > 60:
# print "Bad angle"
continue
hydrogenmatch = 1
hbondconfirmed.append(
["D", entry[0], entry[1], angle, distance])
# print "aadonors:",aa_donors
found_donor = 0
for donor in ligand_donors[ligand]:
d = (donor[1] - entry[3]).norm()
# print charged,d,residue,entry
if d < 0.5:
found_donor = 1
hydrogens = donor[2]
for hydrogen in hydrogens:
hydrogenvector = hydrogen - donor[1]
bindingvector = entry[4] - hydrogen
angle = round(degrees(Vector.angle(
hydrogenvector, bindingvector)), 2)
distance = round(bindingvector.norm(), 2)
# print "LIGDONOR",residue,"From
# ligand",entry[0],"To
# AA",entry[1],"HydrogenCheck
# angle",angle,"Distance from hydrogen to
# acceptor",distance
if distance > 2.5:
# print "Too far away"
continue
if angle > 60:
# print "Bad angle"
continue
hydrogenmatch = 1
hbondconfirmed.append(
["A", entry[0], entry[1], angle, distance])
found_acceptor = 0
for acceptor in ligand_acceptors[ligand]:
d = (acceptor[1] - entry[3]).norm()
# print charged,d,residue,entry
if d < 0.5:
found_acceptor = 1
if found_donor==0 and res_is_donor:
hydrogenmatch = 1
hbondconfirmed.append(['D']) #set residue as donor
#print 'found acceptor which is not donor',residue,entry[0],acceptor
if not found_acceptor and found_donor and res_is_acceptor:
hydrogenmatch = 1
hbondconfirmed.append(['A']) #set residue as acceptor
#print 'donor which is not acceptor',residue,entry[0]
if found_acceptor and found_donor:
if res_is_donor and not res_is_acceptor:
hydrogenmatch = 1
hbondconfirmed.append(['D'])
elif not res_is_donor and res_is_acceptor:
hydrogenmatch = 1
hbondconfirmed.append(['A'])
else:
pass
#print 'can be both donor and acceptor'
chargedcheck = 0
charge_value = 0
res_charge_value = 0
doublechargecheck = 0
for charged in ligand_charged[ligand]:
d = (charged[1] - entry[3]).norm()
if d < 0.5:
# print 'found charge',residue,d,entry
chargedcheck = 1
hydrogenmatch = 0 # Replace previous match!
charge_value = charged[2]
if residue[0:3] in CHARGEDAA:
# print "check for hbondplus!",residue,entry
# Need to check which atoms, but for now assume charged
if chargedcheck:
doublechargecheck = 1
chargedcheck = 1
hydrogenmatch = 0 # Replace previous match!
if AA[residue[0:3]] in POSITIVE:
res_charge_value = 1
elif AA[residue[0:3]] in NEGATIVE:
res_charge_value = -1
if entry[1] == 'N': #backbone connection!
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HB_backbone')
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_backbone','polar (hydrogen bond with backbone)','polar','protein',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
elif entry[1] == 'O': #backbone connection!
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HB_backbone')
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_backbone','polar (hydrogen bond with backbone)','polar','protein',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
elif hydrogenmatch:
found = 0
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HB')
for x in summary_results[ligand]['hbond_confirmed']:
if residue == x[0]:
# print "Already key there",residue
key = summary_results[ligand][
'hbond_confirmed'].index(x)
summary_results[ligand]['hbond_confirmed'][
key][1].extend(hbondconfirmed)
found = 1
if hbondconfirmed[0][0]=="D":
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_donor_protein','polar (hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
if hbondconfirmed[0][0]=="A":
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_acceptor_protein','polar (hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
if found == 0:
summary_results[ligand]['hbond_confirmed'].append(
[residue, hbondconfirmed])
if chargedcheck:
type = 'hbondplus'
hbondplus.append(entry)
elif chargedcheck:
type = 'hbondplus'
hbondplus.append(entry)
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HBC')
remove_hyd(residue,ligand)
if doublechargecheck:
if (res_charge_value>0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_double_pos_protein','polar (charge-charge)','polar','',entry[0],entry[1],entry[2]])
elif (res_charge_value<0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_double_neg_protein','polar (charge-charge)','polar','',entry[0],entry[1],entry[2]])
elif (charge_value>0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_pos_ligand','polar (charge-assisted hydrogen bond)','polar','ligand',entry[0],entry[1],entry[2]])
elif (charge_value<0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_neg_ligand','polar (charge-assisted hydrogen bond)','polar','ligand',entry[0],entry[1],entry[2]])
else:
if (res_charge_value>0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_pos_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
elif (res_charge_value<0):
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_neg_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
else:
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_unknown_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]])
else:
type = 'hbond'
hbond.append(entry)
fragment_file = fragment_library(ligand, entry[3], entry[
0], entry[5], entry[6], 'HB')
new_results[ligand]['interactions'].append([residue,fragment_file,'polar_unspecified','polar (hydrogen bond)','polar','',entry[0],entry[1],entry[2]])
remove_hyd(residue,ligand)
#print type,hbondconfirmed
entry[3] = ''
if (entry[2] < 4.5):
sum += 1
score += 4.5 - entry[2]
score = round(score, 2)
if type == 'waals' and score > 2: # mainly no hbond detected
summary_results[ligand]['waals'].append([residue, score, sum])
elif type == 'hbond':
summary_results[ligand]['hbond'].append(
[residue, score, sum, hbond])
elif type == 'hbondplus':
summary_results[ligand]['hbondplus'].append(
[residue, score, sum, hbondplus])
# elif type == 'hbond_confirmed':
# summary_results[ligand]['hbond_confirmed'].append([residue,score,sum,hbondconfirmed])
ligscore += score
# print "Total <4 (score is combined diff from
# 4)",sum,"score",score
sortedresults.append([residue, score, sum, hbond, type])
summary_results[ligand]['score'].append([ligscore])
summary_results[ligand]['inchikey'] = inchikeys[ligand]
summary_results[ligand]['smiles'] = smiles[ligand]
new_results[ligand]['score'] = ligscore
new_results[ligand]['inchikey'] = inchikeys[ligand]
new_results[ligand]['smiles'] = smiles[ligand]
if ligand in hetlist_display:
summary_results[ligand]['prettyname'] = hetlist_display[ligand]
new_results[ligand]['prettyname'] = hetlist_display[ligand]
# print ligand,"Ligand score:"+str(ligscore)
sortedresults = sorted(sortedresults, key=itemgetter(1), reverse=True)
def pretty_results():
for ligand, result in summary_results.iteritems():
output = ''
bindingresidues = []
#output += "Results for "+str(ligand)+"\n"
for type, typelist in result.iteritems():
if type == 'waals':
continue
output += type + "\n"
if type == 'waals':
typelist = sorted(typelist, key=itemgetter(2), reverse=True)
if type == 'hydrophobic':
typelist = sorted(typelist, key=itemgetter(1), reverse=True)
for entry in typelist:
if type != 'score':
bindingresidues.append(entry[0])
if type == 'hbond':
output += '\t'.join(map(str, entry[0:1])) + '\n'
for bond in entry[3]:
output += '\t'.join(map(str, bond[0:3])) + '\n'
elif type == 'hbondplus':
output += '\t'.join(map(str, entry[0:1])) + '\n'
for bond in entry[3]:
output += '\t'.join(map(str, bond[0:3])) + '\n'
elif type == 'hbond_confirmed':
output += '\t'.join(map(str, entry[0:1])) + '\n'
for bond in entry[1]:
output += '\t'.join(map(str, bond)) + '\n'
else:
# print entry
output += '\t'.join(map(str, entry)) + '\n'
temp_path = projectdir + 'results/' + pdbname + '/output/' + \
pdbname + '_' + ligand.replace("H_", "") + '.yaml'
# yaml.dump(result, open(temp_path, 'w'))
yaml.dump(new_results[ligand], open(temp_path, 'w'))
if debug:
print ligand,'\n',open(temp_path,'r').read()
addresiduestoligand(ligand, pdbname, bindingresidues)
def calculate_interactions(pdb, session=None, peptide=None):
global pdbname, hetlist, hetlist_display, ligand_atoms, ligand_charged, ligandcenter, ligand_rings, ligand_donors, ligand_acceptors, results, sortedresults, summary_results, inchikeys, smiles, projectdir, new_results, peptideligand
hetlist = {}
hetlist_display = {}
ligand_atoms = {}
ligand_charged = {}
ligandcenter = {}
ligand_rings = {}
ligand_donors = {}
ligand_acceptors = {}
results = {}
sortedresults = {}
summary_results = {}
new_results = {}
inchikeys = {}
smiles = {}
peptideligand = peptide
if not session:
pdbname = pdb
# print "checking normal ",pdbname
check_pdb()
checkdirs()
hetlist_display = find_ligand_full_names()
create_ligands_and_poseview()
build_ligand_info()
find_interactions()
analyze_interactions()
pretty_results()
else:
pdbname = pdb
projectdir = '/tmp/interactions/' + session + "/"
checkdirs()
hetlist_display = find_ligand_full_names()
create_ligands_and_poseview()
build_ligand_info()
find_interactions()
analyze_interactions()
pretty_results()
def main(argv):
pdbname = ''
try:
# print 'ARGV :', argv
opts, args = getopt.getopt(argv, "p:s:c:", ["pdb"])
except getopt.GetoptError as err:
print "Remember PDB name -p "
print err
sys.exit(2)
session = None
peptide = None
for opt, arg in opts:
if opt in ("-p"):
pdbname = arg
elif opt in ("-s"):
session = arg
elif opt in ("-c"):
peptide = arg
if not pdbname:
print "Remember PDB name -p "
sys.exit(2)
if session:
calculate_interactions(pdbname, session, peptide=peptide)
else:
calculate_interactions(pdbname, peptide=peptide)
if __name__ == "__main__":
main(sys.argv[1:])
#pdbname = '1F88'
# calculate_interactions(pdbname)
|
cmunk/protwis
|
interaction/legacy_functions.py
|
Python
|
apache-2.0
| 61,900
|
[
"Open Babel",
"Pybel",
"RDKit"
] |
7725aaf2411b7177c3cf824d107ff389e46b6732f9a1f898411606a4be74cf77
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import re
from helpers import htmlentityhelper
class Algorithms:
"""
Contains all algorithms for decoding urls to media-urls
"""
#===========================================================================
def ExtractMediaUrl(self, url, data):
"""
returns the full media url for the given url and code
"""
#========================================================= Megavideo.com
if url.find("megavideo.com") > 0:
#the usual decode which is:
# a replacement of the
#"flv","UIIM%07%12%12JJJ%0D%08%13PXZ%5CKTYXR%13%5ERP%12%5BTQXN%12X%0DX%5E%09%0CY%0B%5E%05%0B%04X%08%04%0F%0A%0C%0E%08%5E%5E%0C%04%08Y%5B%04%0B%0C%0D%0D%12
# http: / / www0 5 . mega video. c om/ f iles/ e0 ec 4 1 d6 c 8 6 9 e5 9 2 7 1 3 5 c c 1 9 5 df 9 6 1 0 0 /
#http://www05.megavideo.com/files/e0ec41d6c869e5927135cc195df96100/
codeRegex = 'addVariable\("flv","([^"]+)"\)'
codeResults = re.findall(codeRegex, data, re.DOTALL + re.IGNORECASE)
if len(codeResults) > 0:
code = codeResults[-1]
dictionary = {"_": "b", "I": "t", "J": "w", "K": "v", "M": "p", "N": "s", "P": "m", "Q": "l", "R": "o", "T": "i", "U": "h", "X": "e", "Y": "d", "Z": "g", "%04": "9", "%05": "8", "%07": ":", "%08": "5", "%09": "4", "%0A": "7", "%0B": "6", "%0C": "1", "%0D": "0", "%0E": "3", "%0F": "2", "%12": "/", "%13": ".", "%5B": "f", "%5C": "a", "%5E": "c"}
return self.RegexReplaceDictionary(code, dictionary)
else:
return ""
#=========================================================== youtube.com
elif url.find("youtube.com") > 0:
# idea from http://linux.byexamples.com/archives/302/how-to-wget-flv-from-youtube/
mediaRegex = "var fullscreenUrl = '/[^']+(video_id=[^']+)title="
mediaResults = re.findall(mediaRegex, data, re.DOTALL + re.IGNORECASE)
if len(mediaResults) > 0:
lastPart = mediaResults[-1]
mediaUrl = "%s%s" % ("http://www.youtube.com/get_video.php?", lastPart)
return mediaUrl
else:
return ""
#============================================================== veoh.com
elif url.find("veoh.com") > 0:
#===================================================================
# This will only retrieve the first 25 MB
#===================================================================
# taken from http://www.jeroenwijering.com/?thread=5665#msg27949
#fullPreviewHashPath="http://content.veoh.com/flash/p/6279991/0901638753f53dc6515f7624ae6dd13753308925.flv?ct=9137981c7c86355d639a9291d56c28ada1d5a82d8faabe50"
mediaRegex = 'fullPreviewHashPath="([^"]+)"'
mediaResults = re.findall(mediaRegex, data, re.DOTALL+re.IGNORECASE)
if len(mediaResults) > 0:
return mediaResults[-1]
else:
return ""
# NOT WORKING ANYMORE method 2 taken from http://board.alluc.org/viewtopic.php?pid=433097
#http://www.veoh.com/videos/v1990401qg3bdNEa <-- This is the link to the video, the PermalinkID is the last part starting with "v199..."
#http://www.veoh.com/rest/video/v1990401qg3bdNEa/details <-- This gets you the information page.
#originalHash="94f05afdb2d7d1bcb8289bdfb325c51f73874B1O" <-- This is just an example of what you might see.
#origExtension=".avi" <-- This is the type of the original file that was uploaded.
#Use the template above and you get: http://p-cache.veoh.com/cache/external/94f05afdb2d7d1bcb8289bdfb325c51f73874B1O.avi?v1990401qg3bdNEa
# originalHash="a5c40e07ae0730d1edc6ea99ded7195a79fc2e2b" origExtension=".mp4"
#hashRegex = 'originalHash="([^"]+)"'
#extRegex = 'origExtension="([^"]+)"'
#idRegex = 'permalinkId="([^"]+)"'
#
#hashResults = re.findall(hashRegex, data, re.DOTALL+re.IGNORECASE)
#extResults = re.findall(extRegex, data, re.DOTALL+re.IGNORECASE)
#idResults = re.findall(idRegex, data, re.DOTALL+re.IGNORECASE)
#
#if len(hashResults) > 0 and len(extResults) > 0 and len(idResults) > 0:
# #url = "http://p-cache.veoh.com/cache/external/%s%s?permalinkld=%s" % (hashResults[-1], extResults[-1], idResults[-1])
# url = "http://ex-cache.veoh.com/cache/external/%s%s?permalinkld=%s" % (hashResults[-1], extResults[-1], idResults[-1])
# return url
#else:
# return ""
#==============================================================================
elif url.find("video.google.com") > 0:
# extract the url
mediaRegex = "googleplayer.swf\?&videoUrl=([^ ]+)"
mediaResults = re.findall(mediaRegex, data, re.DOTALL+re.IGNORECASE)
if len(mediaResults) > 0:
mediaUrl = mediaResults[-1]
# replace the entities and return the value
return htmlentityhelper.HtmlEntityHelper.ConvertURLEntities(mediaUrl)
else:
return ""
return "Nothing"
#===========================================================================
def DecodeItemUrl(self, url):
"""
Converts a coded URL to a real one
"""
#========================================================= Megavideo.com
if url.find("megavideo.com") > 0:
#http://www.megavideo.com/v/QS7N924R0b5aac3284b34463fa9e6aeea929bb58.8531586.0
#http://www.megavideo.com/?v=QS7N924R
if url.find("/?") < 1:
# do the url rewrite version
regex = re.compile(r'(http://[^/]+/)v/(.{8}).*', re.VERBOSE)
url = regex.sub(r'\1?v=\2',url)
return url
#=========================================================== youtube.com
elif url.find("youtube.com") > 0:
return url
#============================================================== veoh.com
elif url.find("veoh.com") > 0:
# taken from http://www.jeroenwijering.com/?thread=5665#msg27949
# http://www.veoh.com/videos/v669398tRxFQc4d?cmpTag=featured&rank=0
# http://www.veoh.com/rest/video/v669398tRxFQc4d/details
if url.find("/rest/video") > 0:
return url
regex = re.compile(r'http://www.veoh.com/videos/([^?%/]+)([\w\W]+)')
url = regex.sub(r'http://www.veoh.com/rest/video/\1/details', url)
return url
#==============================================================================
elif url.find("googleplayer.swf") > 0:
#http://video.google.com/googleplayer.swf\?docId=([^"]+)
regex = re.compile('http://video.google.com/googleplayer.swf\?docId=(.+)', re.DOTALL + re.IGNORECASE)
results = regex.findall(url)
if len(results) > 0:
return "http://video.google.com/videoplay?docid=%s" % results[-1]
else:
return ""
#========================================================= We don't know
else:
return "Nothing"
#============================================================================
# Helper fuctions
#============================================================================
def RegexReplaceDictionary(self, string, dictionary):
"""
take a text and replace words that match a key in a dictionary with
the associated value, return the changed text
"""
rc = re.compile('|'.join(map(re.escape, dictionary)))
def Translate(match):
return dictionary[match.group(0)]
return rc.sub(Translate, string)
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/algorithms.py
|
Python
|
gpl-2.0
| 9,251
|
[
"VisIt"
] |
8dd77dc387aab60091ab77681340ae01593c443342b4d1bdbb8c8da72e0ba6c9
|
"""
A module for generic classification purpose.
Funtionality include:
normalize_l2norm: Normalize each row has unit l_2 norm.
normalize_col_scale01: Normalize each feature (column) to scale [0,1].
normalize_row_scale01: Normalize each sample (row) to scale [0,1].
normalize_mean0std1: Normalize each feature to have mean 0 and std 1.
normalize_matrical_samples: Normalize matrical samples.
balance_sample_size: Balance sample size of a data set among classes.
change_class_labels: Change class labels to {0,1,2,3,...,C-1}.
change_class_labels_back: Change class labels from {0,1,2,..,C-1} to C given labels.
change_class_labels_to_given: Change original class labels to a given labels.
merge_class_labels: Merge class labels into several super groups/classes.
take_some_classes: Only take sevaral classes, and remove the rest.
partition_train_valid_test: Partition the whole data into training, validation, and test sets.
kfold_cross_validation: k-fold cross-validation.
reduce_sample_size: Reduce sample by to 1/times.
take_unique_features: Take unqiue features and make the change in the data accordingly.
take_unique_features_large: Take unqiue features and make the change in a big data accordingly. Write the resulted data into a txt file.
take_common_features: Return common features and their indices.
perform: Compute the classification performance given predicted and actual class labels.
con_mat_to_num: Compute performance given confusion matrix.
save_perform: Save performance to a txt file.
write_feature_weight: Write the weights of the input layer of a DFS model to a file. Only applicable to deep feature selection.
write_feature_weight2: Write the weights of the input layer of a DFS and other information (accuracy, feature subsets) to a file. Only applicable to deep feature selection.
plot_bar_group: Plot grouped bars given a matrix.
plot_3dbar_group: Plot grouped 3d-bars given a matrix.
plot_bar_group_subplots: Plot subplots of the (classification) performance.
plot_box_multi: Plot multiple boxes in a plot according to class information.
Yifeng Li
CMMT, UBC, Vancouver
Sep 23, 2014
Contact: yifeng.li.cn@gmail.com
"""
from __future__ import division
import numpy as np
#from sklearn import cross_validation
import math
import os
import sys
def normalize_l2norm(data,tol=0):
"""
Normalize each row has unit l_2 norm.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
tol: tolerance to avoid errors.
OUTPUTS:
data: numpy 2d array or matrix, normalized data.
Example:
data=[[3,5,7,9],[3.0,2,1.1,8.4],[5.9,9,8,10]]
data=np.array(data)
data_normalized=normalize_l2norm(data)
print data_normalized
"""
data_sqrt=np.sqrt(np.square(data).sum(axis=1))
data_sqrt.shape=(data_sqrt.shape[0],1)
#tol=0#1e-8
data=data/(data_sqrt+tol)
return data
def normalize_col_scale01(data,tol=1e-6,data_min=None,data_max=None,clip=False,clip_min=1e-3,clip_max=1e3):
"""
Normalize each feature (column) to scale [0,1].
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
tol: tolerance to avoid errors.
data_min: numpy 1d array or vector, the minmum values of the columns.
data_max: numpy 1d array or vector, the maxmum values of the columns.
OUTPUTS:
data: numpy 2d array or matrix, normalized data.
data_min: numpy 1d array or vector, the minmum values of the columns.
data_max: numpy 1d array or vector, the maxmum values of the columns.
Example: ...
"""
if clip:
data[data<clip_min]=clip_min
data[data>clip_max]=clip_max
if data_max is None:
data_max=np.max(data,axis=0)
data_max.reshape((1,data_max.shape[0]))
if data_min is None:
data_min=np.min(data,axis=0)
data_min.reshape((1,data_min.shape[0]))
#tol=0#1e-8
return (data-data_min)/(data_max-data_min+tol),data_min,data_max
def normalize_row_scale01(data,tol=1e-6,data_min=None,data_max=None,clip=False,clip_min=1e-3,clip_max=1e3):
"""
Normalize each sample (row) to scale [0,1].
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
tol: tolerance to avoid errors.
data_min: numpy 1d array or vector, the minmum values of the rows.
data_max: numpy 1d array or vector, the maxmum values of the rows.
OUTPUTS:
data: numpy 2d array or matrix, normalized data.
data_min: numpy 1d array or vector, the minmum values of the rows.
data_max: numpy 1d array or vector, the maxmum values of the rows.
Example: ...
"""
if clip:
data[data<clip_min]=clip_min
data[data>clip_max]=clip_max
if data_max is None:
data_max=np.max(data,axis=1)
data_max.shape=(data_max.shape[0],1)
#if clip:
# data_max[data_max>clip_max]=clip_max
if data_min is None:
data_min=np.min(data,axis=1)
data_min.shape=(data_min.shape[0],1)
#if clip:
# data_min[data_min<clip_min]=clip_min
#tol=1e-6#1e-8
return (data-data_min)/(data_max-data_min+tol),data_min,data_max
def normalize_mean0std1(data,data_mean=None,data_std=None,tol=1e-6):
"""
Normalize each feature (feature) to mean 0 and std 1.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
data_mean: numpy 1d array or vector, the given means of samples, useful for normalize test data.
data_std: numpy 1d array or vector, the given standard deviation of samples, useful for normalize test data.
tol: tolerance to avoid errors.
OUTPUTS:
data: numpy 2d array or matrix, normalized data.
data_mean: numpy 1d array or vector, the given means of samples, useful for normalize test data.
data_std: numpy 1d array or vector, the given standard deviation of samples, useful for normalize test data.
"""
if data_mean is None:
data_mean=np.mean(data,axis=0)
data_mean.reshape((1,data_mean.shape[0]))
if data_std is None:
data_std=np.std(data,axis=0)
data_std.reshape((1,data_std.shape[0]))
#tol=0#1e-8
return (data-data_mean)/(data_std+tol),data_mean,data_std
def normalize_deseq(data,size_factors=None):
# DESEQ normalization
if size_factors is not None:
data_normalized=data/size_factors
return data_normalized,size_factors
num_features,num_samples=data.shape
geometric_means=np.zeros(shape=(num_features,1),dtype=float)
for f in range(num_features):
if np.any(data[f,:]==0):
geometric_means[f,0]=0
else:
geometric_means[f,0]=2**(np.mean(np.log2(data[f,:])))
#geometric_means=self.data_noheader_nofeature.prod(axis=1)**(1.0/self.num_samples) # geometric mean across all samples
print geometric_means
print np.median(geometric_means)
print geometric_means.shape
#geometric_means.shape=(len(geometric_means),1)
#data_div_means=data_noheader_nofeature_copy/geometric_means
#data_div_means=numpy.ma.masked_where(numpy.logical_or(data_div_means==numpy.inf,data_div_means==numpy.nan), data_div_means)
#print data_div_means[1:10,:]
#self.size_factors=numpy.ma.median(data_div_means, axis=0).filled(0) # masked median
# compute size factors
ind_geometric_means=geometric_means[:,0]!=0
print ind_geometric_means.shape
print "Total number of geometric means not equal to zero:{}".format(ind_geometric_means.sum())
size_factors=np.zeros(shape=(1,num_samples),dtype=float)
for s in range(num_samples):
size_factors[0,s]=np.median(data[ind_geometric_means,s]/(geometric_means[ind_geometric_means,0]))
if np.any(size_factors==0):
print "Warning: at least one size factor = 0!"
data_normalized=data/size_factors
return data_normalized,size_factors
def normalize_matrical_samples(data,num_signal,method="l2norm"):
"""
Normalize matrical samples.
INPUTS:
data: numpy 2d array or matrix, each row is a vectorized sample.
num_signal: scalar, number of features in each sample (this parameter is needed to convert a vectorized sample into a matrix.).
method: string, method to normalize each feature (or signal), can be "l2norm", "mean0std1", "scale01".
OUTPUTS:
change the data in-place.
"""
feat_total=data.shape[1]
feat_each=feat_total//num_signal
for i in range(num_signal):
if method=="l2norm":
data[:,i*feat_each:(i+1)*feat_each]=normalize_l2norm(data[:,i*feat_each:(i+1)*feat_each])
if method=="mean0std1":
data[:,i*feat_each:(i+1)*feat_each],data_mean,data_std=normalize_mean0std1(data[:,i*feat_each:(i+1)*feat_each])
if method=="scale01":
data[:,i*feat_each:(i+1)*feat_each],data_min,data_max=normalize_row_scale01(data[:,i*feat_each:(i+1)*feat_each],tol=1e-6)
def balance_sample_size(data,classes,others=None,min_size_given=None,rng=np.random.RandomState(100)):
"""
Balance sample size of a data set among classes by reducing the large classes.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
classes: numpy 1d array or vector, class labels.
others: numpy 2d array or matrix, extra information of samples if available,
each row should associated to a row of data.
min_size_given: int, the size of each class wanted.
rng: numpy random state.
OUTPUTS:
data: numpy 2d array or matrix, each row should be a sample, balanced data.
classes: numpy 1d array or vector, balanced class labels.
others: numpy 2d array or matrix, balanced other information.
Example:
data=[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6],[7,7,7]]
data=np.array(data)
classes=np.array(['zz','xx','xx','yy','zz','yy','xx'])
balance_sample_size(data,classes)
"""
u, indices = np.unique(classes,return_inverse=True)
indices=np.asarray(indices)
num_u=len(u)
sample_sizes=[]
# get sample size of each class
for i in xrange(num_u):
sample_size_this=np.sum(indices==i)
sample_sizes.append(sample_size_this)
size_min=np.amin(sample_sizes) # smallest sample size
if min_size_given and size_min>min_size_given:
size_min=min_size_given
indices_all=np.array([],dtype=indices.dtype)
indices_range=np.array(range(len(indices)))
for i in xrange(num_u):
ind_this_num=indices_range[indices==i]
ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=size_min,replace=False)]
indices_all=np.append(indices_all,ind_this_reduced)
# reduce the data
data=data[indices_all]
classes=classes[indices_all]
if np.any(others):
others=others[indices_all]
return data,classes,others
def balance_sample_size_increase(data,classes,others=None,max_size_given=None,rng=np.random.RandomState(100)):
"""
Balance sample size of a data set among classes by increasing the sample size of small classes.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
classes: numpy 1d array or vector, class labels.
others: numpy 2d array or matrix, extra information of samples if available,
each row should associated to a row of data.
max_size_given: int, the size of each class wanted.
rng: numpy random state.
OUTPUTS:
data: numpy 2d array or matrix, each row should be a sample, balanced data.
classes: numpy 1d array or vector, balanced class labels.
others: numpy 2d array or matrix, balanced other information.
Example:
data=[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6],[7,7,7]]
data=np.array(data)
classes=np.array(['zz','xx','xx','yy','zz','yy','xx'])
balance_sample_size_increase(data,classes)
"""
u, indices = np.unique(classes,return_inverse=True)
indices=np.asarray(indices)
num_u=len(u)
sample_sizes=[]
# get sample size of each class
for i in xrange(num_u):
sample_size_this=np.sum(indices==i)
sample_sizes.append(sample_size_this)
size_max=np.amax(sample_sizes) # largest sample size
if max_size_given and size_max<max_size_given:
size_max=max_size_given
indices_all=np.array([],dtype=indices.dtype)
indices_range=np.array(range(len(indices)))
for i in xrange(num_u):
ind_this_num=indices_range[indices==i]
#replacetf=True if sample_sizes[i]<size_max else False
if sample_sizes[i]>=size_max:
ind_this_increased=ind_this_num[rng.choice(sample_sizes[i],size=size_max,replace=False)]
indices_all=np.append(indices_all,ind_this_increased)
else: # make sure each sample is used at least once
ind_this_increased=ind_this_num
ind_this_increased2=ind_this_num[rng.choice(sample_sizes[i],size=size_max-sample_sizes[i],replace=True)]
indices_all=np.append(indices_all,ind_this_increased)
indices_all=np.append(indices_all,ind_this_increased2)
# increase the data
data=data[indices_all]
classes=classes[indices_all]
if np.any(others):
others=others[indices_all]
return data,classes,others
def summarize_classes(classes):
"""
Print a summary of the classes.
"""
u, indices = np.unique(classes,return_inverse=True)
num_u=len(u)
print "****************************"
print "Number of samples: {0}".format(len(classes))
print "Number of Classes:{0}".format(num_u)
for c in u:
num_c=np.sum(classes==c)
print "Class {0}: {1} Samples".format(c,num_c)
print "****************************"
def sort_classes(data,classes,others=None):
"""
Group the class labels into blocks, if the class lables distribue randomly in the vector.
data: numpy array, each row is a sample.
classes: numpy 1d array, the class labels.
"""
indices = np.argsort(classes,kind="mergesort")
#print indices
data=data[indices,:]
classes=classes[indices]
if others is not None:
others=others[indices]
return data,classes,others
def truncate_sample_size(data,classes,others=None,max_size_given=None,rng=np.random.RandomState(100)):
"""
Balance sample size of a data set among classes.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
classes: numpy 1d array or vector, class labels.
others: numpy 2d array or matrix, extra information of samples if available,
each row should associated to a row of data.
min_size_given: int, the size of each class wanted.
rng: numpy random state.
OUTPUTS:
data: numpy 2d array or matrix, each row should be a sample, balanced data.
classes: numpy 1d array or vector, balanced class labels.
indices_all: numpy 1d array, the numerical indices of samples selected.
others: numpy 2d array or matrix, balanced other information.
Example:
data=[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6],[7,7,7]]
data=np.array(data)
classes=np.array(['zz','xx','xx','yy','zz','yy','xx'])
balance_sample_size(data,classes,others=NOne,max_size_given=50)
"""
u, indices = np.unique(classes,return_inverse=True)
indices=np.asarray(indices)
num_u=len(u)
sample_sizes=[]
# get sample size of each class
for i in xrange(num_u):
sample_size_this=np.sum(indices==i)
sample_sizes.append(sample_size_this)
sample_sizes=np.array(sample_sizes,dtype=int)
size_min=np.amin(sample_sizes) # smallest sample size
size_max=np.amax(sample_sizes) # largest sample size
if size_max<max_size_given:
max_size_given=size_max
sample_sizes[sample_sizes>max_size_given]=max_size_given
indices_all=np.array([],dtype=indices.dtype)
indices_range=np.array(range(len(indices)))
for i in xrange(num_u):
ind_this_num=indices_range[indices==i]
ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]
indices_all=np.append(indices_all,ind_this_reduced)
# reduce the data
data=data[indices_all,:]
classes=classes[indices_all]
if np.any(others):
others=others[indices_all]
return data,classes,indices_all,others
def sampling(data,classes,others=None,portion=0.9,max_size_given=None,rng=np.random.RandomState(100)):
"""
Sample data points for a given portion and upper limit.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
classes: numpy 1d array or vector, class labels.
others: numpy 2d array or matrix, extra information of samples if available,
each row should associated to a row of data.
portion: float, portion of data points to be sampled.
max_size_given: int, upper limit of the sampled data points in each class; if None, no limit.
rng: numpy random state.
OUTPUTS:
data: numpy 2d array or matrix, each row should be a sample, balanced data.
classes: numpy 1d array or vector, balanced class labels.
indices_all: numpy 1d array, the numerical indices of samples selected.
others: numpy 2d array or matrix, balanced other information.
Example:
data=[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6],[7,7,7]]
data=np.array(data)
classes=np.array(['zz','xx','xx','yy','zz','yy','xx'])
sampling(data,classes,others=None,portion=0.6,max_size_given=50)
"""
u, indices = np.unique(classes,return_inverse=True)
indices=np.asarray(indices)
num_u=len(u)
sample_sizes=[]
# get sample size of each class
for i in xrange(num_u):
sample_size_this=np.sum(indices==i)
sample_sizes.append(sample_size_this)
sample_sizes=np.array(sample_sizes,dtype=int)
sample_sizes=sample_sizes*portion
sample_sizes=np.array(sample_sizes,dtype=int)
# set a ceiling/limit
if max_size_given is not None:
sample_sizes[sample_sizes>max_size_given]=max_size_given
indices_all=np.array([],dtype=indices.dtype)
indices_range=np.array(range(len(indices)))
# sampling
for i in xrange(num_u):
ind_this_num=indices_range[indices==i]
ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]
indices_all=np.append(indices_all,ind_this_reduced)
# reduce the data
data=data[indices_all,:]
classes=classes[indices_all]
if np.any(others):
others=others[indices_all]
return data,classes,indices_all,others
def sampling_class_portion(data,classes,others=None,class_portion=None,rng=np.random.RandomState(100)):
"""
Sampling data points in each class to keep a given portion among classes.
class_portion: dict, the portion for each class, each value should be at least 1, e.g. class_portion={"class0":5,"class1":1,"class3":2}
"""
u, indices = np.unique(classes,return_inverse=True)
indices=np.asarray(indices)
num_u=len(u)
sample_sizes=dict()
# get sample size of each class
size_min=float("inf")
for i in xrange(num_u):
sample_size_this=np.sum(indices==i)
sample_sizes[u[i]]=sample_size_this
if class_portion[u[i]]==1 and sample_size_this<size_min:
size_min=sample_size_this
print size_min
indices_all=np.array([],dtype=indices.dtype)
indices_range=np.array(range(len(indices)))
# sampling
for i in xrange(num_u):
ind_this_num=indices_range[indices==i]
replacetf=True if sample_sizes[u[i]]<(size_min*class_portion[u[i]]) else False
ind_this_reduced=ind_this_num[rng.choice(sample_sizes[u[i]],size=size_min*class_portion[u[i]],replace=replacetf)]
indices_all=np.append(indices_all,ind_this_reduced)
# get the sampled data
data=data[indices_all,:]
classes=classes[indices_all]
if np.any(others):
others=others[indices_all]
return data,classes,indices_all,others
def change_class_labels(classes):
"""
Change class labels to {0,1,2,3,...,C-1}.
INPUTS:
classes: numpy 1d array or vector, the original class labels.
OUTPUTS:
u: numpy 1d array or vector, the unique class labels of the original class labels.
indices: numpy 1d array or vector, the new class labels from {0,1,2,3,...,C-1}.
Example:
classes=['c2','c3','c2','c1','c2','c1','c3','c2']
change_class_labels(classes)
Yifeng Li, in UBC
Aug 22, 2014.
"""
u,indices=np.unique(classes,return_inverse=True)
return u,indices
def change_class_labels_back(classes,given):
"""
Change class labels from {0,1,2,..,C-1} to C given labels.
INPUTS:
classes: numpy 1 d array or vector, the original class labels.
given: list of new labels.
OUTPUTS:
classes_new: numpy 1 d array or vector, changed class labels.
Example:
classes=[1,2,0,0,2,1,1,2]
given=["class0","class1","class2"]
change_class_labels_to_given(classes,given)
"""
classes=np.asarray(classes)
classes_new=np.zeros(classes.shape,dtype=object)
for i in range(len(given)):
classes_new[classes==i]=given[i]
return classes_new
def change_class_labels_to_given(classes,given):
"""
Change original class labels to given labels.
INPUTS:
classes: numpy 1 d array or vector, the original class labels.
given: dic, pairs of old and new labels. Or list of new labels.
OUTPUTS:
classes_new: numpy 1 d array or vector, changed class labels.
Example:
classes=[1,2,0,0,2,1,1,2]
given={1:"class1", 2:"class2", 0:"class0"}
# given=["class0","class1","class2"]
change_class_labels_to_given(classes,given)
"""
classes=np.asarray(classes)
classes_new=np.zeros(classes.shape,dtype=object)
for i in given:
classes_new[classes==i]=given[i]
return classes_new
def membership_vector_to_indicator_matrix(z,z_unique=None):
"""
Extend membership vector z to binary indicator matrix Z.
z: list or numpy vector, numerical class labels of samples.
z_unique: list or numpy vector, the unique class labels, useful for tranforming class labels of test samples.
For example:
z=[-1,-1,-1,0,0,0,1,1,2,2,2]
Z=[[1,0,0,0],
[1,0,0,0],
[1,0,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,1,0],
[0,0,0,1],
[0,0,0,1],
[0,0,0,1]]
"""
z=np.array(z,dtype=int)
if z_unique is None:
z_unique=np.unique(z)
M=len(z)
U=len(z_unique)
Z=np.zeros(shape=(M,U),dtype=int)
for m in range(M):
for u in range(U):
if z[m]==z_unique[u]:
Z[m,u]=1
return Z,z_unique
def merge_class_labels(classes,group):
"""
Merge class labels into several super groups/classes.
INPUTS:
classes: numpy 1 d array or vector, the original class labels.
group: tuple of tuples or lists,
group[i] indicates which original classes to be merged to the i-th super class.
OUTPUTS:
classes_merged: numpy 1 d array or vector, the merged class labels.
If original labels are strings, they are concatenated by "+".
If original lables are numbers, they are renumbered starting from 0.
Example
classes=[0,3,4,2,1,3,3,2,4,1,1,0,0,1,2,3,4,1]
group=([0],[1,2],[3,4])
merge_class_labels(classes,group)
classes=['c2','c1','c0','c0','c1','c2','c1']
group=(['c0'],['c1','c2'])
merge_class_labels(classes,group)
"""
classes=np.asarray(classes)
if (classes.dtype != int) and (classes.dtype != 'int64') and (classes.dtype != 'int32'):
classes_merged=np.zeros(classes.shape,dtype=object)
for subgroup in group:
subgroup_label='+'.join(subgroup)
for member in subgroup:
classes_merged[classes==member]=subgroup_label
else: # int class labels
classes_merged=np.zeros(classes.shape,dtype=int)
for i in range(len(group)):
subgroup=group[i]
for member in subgroup:
classes_merged[classes==member]=i
return classes_merged
def take_some_classes(data,classes,given,others=None):
"""
Only take sevaral classes, and remove the rest.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
given: numpy 1d array or vector, indicates which classes to be taken.
others: numpy 1d or 2d array (vector or matrix), others related data, e.g. regions corresponding to the classes.
OUTPUTS:
data: numpy 2d array or matrix, each row is a sample, the taken data.
classes: numpy 1d array or vector, class labels, the taken labels.
others: numpy 1d or 2d array (vector or matrix), the taken "others".
"""
classes=np.asarray(classes)
log_ind=np.zeros(classes.shape,dtype=bool)
for i in range(len(given)):
log_ind[classes==given[i]]=True
classes=classes[log_ind]
data=data[log_ind]
if np.any(others):
others=others[log_ind]
return data,classes,others
def partition_train_valid_test(data, classes,ratio=(1,1,1), rng=np.random.RandomState(1000)):
"""
Partition the whole data into training, validation, and test sets.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
ratio, int tuple or list of length 3, (ratio_of_train_set,ratio_of_valid_set,ratio_test_set).
random_state: random state to generate the random numbers, can be e.g. random_state=1000, default is None.
OUTPUTS:
train_set_x: data of training set.
train_set_y: class labels of training set.
valid_set_x: data of validation set.
valid_set_y: class labels of validation set.
test_set_x: data of test set.
test_set_y: class labels of test set.
Example:
data=np.random.random((20,3))
classes=np.array([0,2,2,2,0,0,1,1,0,0,0,2,2,2,0,0,1,1,0,0],dtype=int)
train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y \
=partition_train_valid_test(data,classes,ratio=(2,1,1))
Yifeng Li, in UBC.
August 22, 2014.
"""
k=sum(ratio) # ratio must be a vector of integers
ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)
sequence=np.arange(len(classes))
train_ind=np.array([],dtype=int)
valid_ind=np.array([],dtype=int)
test_ind=np.array([],dtype=int)
count=0
for ki in range(k):
if count<ratio[0]:
train_ind=np.append(train_ind,sequence[ind==ki])
count=count+1
continue
if count>=ratio[0] and count <ratio[0]+ratio[1]:
valid_ind=np.append(valid_ind,sequence[ind==ki])
count=count+1
continue
if count>=ratio[0]+ratio[1] and ratio[2]>0:
test_ind=np.append(test_ind,sequence[ind==ki])
count=count+1
continue
train_set_x=data[train_ind]
train_set_y=classes[train_ind]
valid_set_x=data[valid_ind]
valid_set_y=classes[valid_ind]
test_set_x=data[test_ind]
test_set_y=classes[test_ind]
return train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y
def partition_train_valid_test2(data, classes, others, ratio=(1,1,1), rng=np.random.RandomState(1000)):
"""
Partition the whole data into training, validation, and test sets. The only difference between partition_train_valid_test2 and partition_train_valid_test is that the former can handle other information.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
others: numpy 2d array or matrix, extra information of samples if available,
each row should associated to a row of data.
ratio, int tuple or list of length 3, (ratio_of_train_set,ratio_of_valid_set,ratio_test_set).
OUTPUTS:
train_set_x: data of training set.
train_set_y: class labels of training set.
train_set_others.
valid_set_x: data of validation set.
valid_set_y: class labels of validation set.
valid_set_others.
test_set_x: data of test set.
test_set_y: class labels of test set.
test_set_others.
Yifeng Li, in UBC.
August 22, 2014.
"""
k=sum(ratio) # ratio must be a vector of integers
ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)
sequence=np.arange(len(classes))
train_ind=np.array([],dtype=int)
valid_ind=np.array([],dtype=int)
test_ind=np.array([],dtype=int)
count=0
for ki in range(k):
if count<ratio[0]:
train_ind=np.append(train_ind,sequence[ind==ki])
count=count+1
continue
if count>=ratio[0] and count <ratio[0]+ratio[1]:
valid_ind=np.append(valid_ind,sequence[ind==ki])
count=count+1
continue
if count>=ratio[0]+ratio[1] and ratio[2]>0:
test_ind=np.append(test_ind,sequence[ind==ki])
count=count+1
continue
train_set_x=data[train_ind]
train_set_y=classes[train_ind]
if others is not None:
train_set_others=others[train_ind]
else:
train_set_others=None
valid_set_x=data[valid_ind]
valid_set_y=classes[valid_ind]
if others is not None:
valid_set_others=others[valid_ind]
else:
valid_set_others=None
test_set_x=data[test_ind]
test_set_y=classes[test_ind]
if others is not None:
test_set_others=others[test_ind]
else:
test_set_others=None
return train_set_x,train_set_y,train_set_others,valid_set_x,valid_set_y,valid_set_others,test_set_x,test_set_y,test_set_others
def kfold_cross_validation(classes,k,shuffle=True,rng=np.random.RandomState(1000)):
"""
kfold cross-validation.
INPUTS:
classes: numpy 1d array of vector.
k: scalar, the number of folds.
shuffle: logical, whether need to shuffle the distribution of each fold.
rng: random number generator.
OUTPUTS:
indices_folds: numpy 1d array, the splits. For example if k=3, indices_folds can be [2,2,1,0,1,2,0,1,1,2,0,0].
Yifeng Li, April 02, 2015, in UBC.
"""
num_samples=len(classes)
classes=np.array(classes)
classes_unique=np.unique(classes)
num_classes=len(classes_unique)
indices=np.arange(num_samples)
indices_folds=np.zeros([num_samples],dtype=int)
for cl in classes_unique:
indices_cl=indices[classes==cl]
num_samples_cl=len(indices_cl)
# split this class into k parts
if shuffle:
rng.shuffle(indices_cl) # in-place shuffle
# module and residual
num_samples_each_split=num_samples_cl//k
res=num_samples_cl%k
num_samples_splits=num_samples_each_split*np.ones([k],dtype=int)
if res>0:
for r in np.arange(res):
num_samples_splits[r]=num_samples_splits[r]+1
# for each part, assign 0,1,2,...,k-1
start=0
end=0
for ki in range(k):
start=end
end=end+num_samples_splits[ki]
indices_folds[indices_cl[start:end]]=ki
return indices_folds
def factor_sizes_to_factor_labels(z,start=-1):
# z is a tuple e.g. (3,2,3) of a list e.g. [3,2,3]
labels=[]
for i in z:
labels.extend([start]*i)
start=start+1
#print labels
return labels
def perform(y,y_predicted,unique_classes):
"""
Compute the classification performance given predicted and actual class labels.
INPUTS:
y: numpy 1d array or vector, the actual class labels.
y_predicted: numpy 1d array or vector, the predicted class labels.
unique_classes: numpy 1d array or vector of length C (# classes), all unique actual class labels.
OUTPUTS:
perf: numpy 1d array or vector of length 2*C+3,
[acc_0, acc_1, acc_{C-1}, accuracy, balanced accuracy].
For two-classes, perf=[sensitivity, specificity, PPV, NPV, accuracy, averaged sensitivity (or called balanced accuracy, that is 0.5*(sen+spec) ), averaged PVs (that is 0.5*(PPV+NPV) )].
For multi-classes, perf=[sen_0, sen_1, ..., sen_{C-1}, precision_0, precision_1, ..., precision_{C-1}, accuracy, averaged sensitivity, averaged percision] that is [class-wise rates, class-wise predictive rates, accuracy, averaged class-wise rate, averaged class-wise predictive rate]
confusion_matrix: numpy 2d array of size C X C, confusion matrix.
Example:
y=[0,0,1,1,1,2,2,2,2]
y_predicted=[0,1,1,1,2,2,2,0,1]
perform(y,y_predicted,[0,1,2])
Yifeng Li, in UBC.
August 23, 2014.
"""
y=np.asarray(y,dtype=int)
y_predicted=np.asarray(y_predicted,dtype=int)
numcl=len(unique_classes)
confusion_matrix=np.zeros((numcl,numcl),dtype=float)
for i in xrange(len(y)):
confusion_matrix[y[i],y_predicted[i]]=confusion_matrix[y[i],y_predicted[i]]+1
perf=np.zeros((2*numcl+3,)) # sensitivity_0,sensitivity_1,...,sensitivity_{C-1}, precision_0,precision_1,...,precision_{C-1}, accuracy, balanced sensitivity, balanced precision
perf[0:numcl]=confusion_matrix.diagonal()/confusion_matrix.sum(axis=1) # sensitivity and specifity for two classes, (class-wise rates for multi-classes)
perf[numcl:2*numcl]=confusion_matrix.diagonal()/confusion_matrix.sum(axis=0) # PPV and NPV for two classes, (class-wise predictive rates for multi-classes)
perf[2*numcl]=confusion_matrix.diagonal().sum()/confusion_matrix.sum(axis=1).sum() # accuracy
perf[2*numcl+1]=np.mean(perf[0:numcl]) # balanced accuracy for two-classes, average class-wise rate for multi-class
perf[2*numcl+2]=np.mean(perf[numcl:2*numcl]) # avarage class-wise predictive rate
return perf,confusion_matrix
def prc(test_set_y_org,test_set_y_pred_prob,classes_unique,plot_curve=False,filename="./fig_prc.pdf",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):
"""
Calcuate the area under precision-recall curve, and draw the precision-recall curve.
"""
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from scipy import interp
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
precision = dict()
recall = dict()
average_precision = dict()
n_classes=len(classes_unique)
test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)
for c in range(n_classes):
precision[c], recall[c], _ = precision_recall_curve(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])
average_precision[c] = average_precision_score(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])
# Compute macro-average ROC curve and AUROC area
# First aggregate all recalls
all_recall = np.unique(np.concatenate([recall[c] for c in range(n_classes)]))
#all_recall = np.sort(np.concatenate([recall[c] for c in range(n_classes)]))
# Then interpolate all PRC curves at this points
mean_precision = np.zeros_like(all_recall)
for c in range(n_classes):
mean_precision = mean_precision + np.interp(all_recall, recall[c][::-1], precision[c][::-1]) # xp in interp() must be in increasing order
# Finally average it and compute AUPRC
mean_precision = mean_precision/n_classes
recall["macro"] = all_recall
precision["macro"] = mean_precision
#roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(test_set_Y_org.ravel(), test_set_y_pred_prob.ravel())
average_precision["macro"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob, average="macro") # micro macro, weighted, or samples
average_precision["micro"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob, average="micro") # micro macro, weighted, or samples
average_precision["weighted"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob, average="weighted") # micro macro, weighted, or samples
average_precision["samples"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob, average="samples") # micro macro, weighted, or samples
if plot_curve:
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
if n_classes>2 or positive_class_for_two_classes is None:
ax.plot(recall["macro"], precision["macro"], linewidth=1,color=colors[n_classes],label='macro-avg PRC (area={0:0.4f})'.format(average_precision["macro"]))
for c in range(n_classes):
if positive_class_for_two_classes==None or (n_classes==2 and positive_class_for_two_classes==c):
ax.plot(recall[c], precision[c],linewidth=1,color=colors[c],label='PRC of {0} (area={1:0.4f})'.format(classes_unique[c], average_precision[c]))
# add some text for labels, title and axes ticks
ax.set_ylim(0.0,1.0)
ax.set_xlim(0.0,1.0)
ax.set_ylabel("Precision",fontsize=12)
ax.set_xlabel("Recall",fontsize=12)
#ax.set_title("",fontsize=15)
ax.legend(loc="lower left",fontsize=8)
#plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
average_precision_list=[average_precision[c] for c in range(n_classes)]
average_precision_list.extend([average_precision["macro"],average_precision["micro"],average_precision["weighted"],average_precision["samples"]])
average_precision=np.array(average_precision_list)
names=["AUPRC_" + c for c in classes_unique]
names.extend(["macro","micro","weighted","samples"])
names=np.array(names)
return average_precision,names
def roc(test_set_y_org,test_set_y_pred_prob,classes_unique,plot_curve=False,filename="./fig_roc.pdf",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):
"""
Calcuate the area under ROC, and draw the ROC.
"""
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from scipy import interp
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
fpr = dict()
tpr = dict()
roc_auc = dict()
n_classes=len(classes_unique)
test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)
for c in range(n_classes):
fpr[c], tpr[c], _ = roc_curve(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])
roc_auc[c] = roc_auc_score(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])
# Compute macro-average ROC curve and AUROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[c] for c in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for c in range(n_classes):
mean_tpr += interp(all_fpr, fpr[c], tpr[c])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
#roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Compute micro-average PRC curve and PRC areas
fpr["micro"], tpr["micro"], _ = roc_curve(test_set_Y_org.ravel(), test_set_y_pred_prob.ravel())
roc_auc["macro"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average="macro") # micro macro, weighted, or samples
roc_auc["micro"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob,average="micro") # micro macro, weighted, or samples
roc_auc["weighted"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average="weighted") # micro macro, weighted, or samples
roc_auc["samples"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average="samples") # micro macro, weighted, or samples
if plot_curve:
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
ax.plot([0, 1], [0, 1], 'k--')
if n_classes>2 or positive_class_for_two_classes is None:
ax.plot(fpr["macro"], tpr["macro"], linewidth=1,color=colors[n_classes],label='macro-avg ROC (area={0:0.4f})'.format(roc_auc["macro"]))
for c in range(n_classes):
if positive_class_for_two_classes is None or (n_classes==2 and positive_class_for_two_classes==c):
ax.plot(fpr[c], tpr[c],linewidth=1,color=colors[c],label='ROC of {0} (area={1:0.4f})'.format(classes_unique[c], roc_auc[c]))
# add some text for labels, title and axes ticks
ax.set_ylim(0.0,1.0)
ax.set_xlim(0.0,1.0)
ax.set_ylabel("True Positive Rate",fontsize=12)
ax.set_xlabel("False Positive Rate",fontsize=12)
#ax.set_title("",fontsize=15)
ax.legend(loc="lower right",fontsize=8)
#plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
roc_auc_list=[roc_auc[c] for c in range(n_classes)]
roc_auc_list.extend([roc_auc["macro"],roc_auc["micro"],roc_auc["weighted"],roc_auc["samples"]])
roc_auc=np.array(roc_auc_list)
names=["AUROC_" + c for c in classes_unique]
names.extend(["macro","micro","weighted","samples"])
names=np.array(names)
return roc_auc,names
def prcs(test_set_y_org,test_set_y_pred_prob,methods,linestyles,classes_unique,plot_curve=False,filename="./fig_prc.pdf",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):
"""
Calcuate the area under precision-recall curve, and draw the precision-recall curve.
INPUTS:
test_set_y_pred_prob: list of numpy 1d arrays.
methods: list of strings, the classification methods used.
classes_unique: list or numpy 1d array of strings, the unique class labels.
linestyles: list of strings, including "solid","dashed", "dashdot", "dotted", each style corresponds to a method.
"""
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from scipy import interp
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
n_classes=len(classes_unique)
test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)
num_methods=len(methods)
average_precisions=[0]*num_methods
names=[None]*num_methods
for m in range(num_methods):
precision = dict()
recall = dict()
average_precision = dict()
for c in range(n_classes):
precision[c], recall[c], _ = precision_recall_curve(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])
average_precision[c] = average_precision_score(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])
# Compute macro-average ROC curve and AUROC area
# First aggregate all recalls
all_recall = np.unique(np.concatenate([recall[c] for c in range(n_classes)]))
#all_recall = np.sort(np.concatenate([recall[c] for c in range(n_classes)]))
# Then interpolate all PRC curves at this points
mean_precision = np.zeros_like(all_recall)
for c in range(n_classes):
mean_precision = mean_precision + np.interp(all_recall, recall[c][::-1], precision[c][::-1]) # xp in interp() must be in increasing order
# Finally average it and compute AUPRC
mean_precision = mean_precision/n_classes
recall["macro"] = all_recall
precision["macro"] = mean_precision
#roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(test_set_Y_org.ravel(), test_set_y_pred_prob[m].ravel())
average_precision["macro"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob[m], average="macro") # micro macro, weighted, or samples
average_precision["micro"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob[m], average="micro") # micro macro, weighted, or samples
average_precision["weighted"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob[m], average="weighted") # micro macro, weighted, or samples
average_precision["samples"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob[m], average="samples") # micro macro, weighted, or samples
if plot_curve:
if m==0:
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
if n_classes>2 or positive_class_for_two_classes is None:
ax.plot(recall["macro"], precision["macro"], linestyle=linestyles[m],linewidth=1,color=colors[n_classes],label='macro-avg PRC (area={0:0.4f}), {1}'.format(average_precision["macro"], methods[m]))
for c in range(n_classes):
if positive_class_for_two_classes==None or (n_classes==2 and positive_class_for_two_classes==c):
ax.plot(recall[c], precision[c],linestyle=linestyles[m],linewidth=1,color=colors[c],label='PRC of {0} (area={1:0.4f}), {2}'.format(classes_unique[c], average_precision[c], methods[m]))
# add some text for labels, title and axes ticks
if m==num_methods-1:
ax.set_ylim(0.0,1.0)
ax.set_xlim(0.0,1.0)
ax.set_ylabel("Precision",fontsize=12)
ax.set_xlabel("Recall",fontsize=12)
#ax.set_title("",fontsize=15)
ax.legend(loc="lower left",fontsize=8)
#plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
average_precision_list=[average_precision[c] for c in range(n_classes)]
average_precision_list.extend([average_precision["macro"],average_precision["micro"],average_precision["weighted"],average_precision["samples"]])
average_precision=np.array(average_precision_list)
name=[methods[m]+"_AUPRC_" + c for c in classes_unique]
name.extend(["macro","micro","weighted","samples"])
name=np.array(name)
average_precisions[m]=average_precision
names[m]=name
return average_precisions,names
def rocs(test_set_y_org,test_set_y_pred_prob,methods,linestyles,classes_unique,plot_curve=False,filename="./fig_roc.pdf",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):
"""
Calcuate the area under ROC, and draw the ROC.
linestyles: list of strings, including "solid","dashed", "dashdot", "dotted"
"""
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from scipy import interp
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
n_classes=len(classes_unique)
test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)
num_methods=len(methods)
roc_aucs=[0]*num_methods
names=[None]*num_methods
for m in range(num_methods):
fpr = dict()
tpr = dict()
roc_auc = dict()
for c in range(n_classes):
fpr[c], tpr[c], _ = roc_curve(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])
roc_auc[c] = roc_auc_score(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])
# Compute macro-average ROC curve and AUROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[c] for c in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for c in range(n_classes):
mean_tpr += interp(all_fpr, fpr[c], tpr[c])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
#roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Compute micro-average PRC curve and PRC areas
fpr["micro"], tpr["micro"], _ = roc_curve(test_set_Y_org.ravel(), test_set_y_pred_prob[m].ravel())
roc_auc["macro"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average="macro") # micro macro, weighted, or samples
roc_auc["micro"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m],average="micro") # micro macro, weighted, or samples
roc_auc["weighted"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average="weighted") # micro macro, weighted, or samples
roc_auc["samples"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average="samples") # micro macro, weighted, or samples
if plot_curve:
if m==0:
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
ax.plot([0, 1], [0, 1], 'k--')
if n_classes>2 or positive_class_for_two_classes is None:
ax.plot(fpr["macro"], tpr["macro"], linestyle=linestyles[m],linewidth=1,color=colors[n_classes],label='{0}: macro-avg ROC (area={1:0.4f})'.format(methods[m], roc_auc["macro"]))
for c in range(n_classes):
if positive_class_for_two_classes is None or (n_classes==2 and positive_class_for_two_classes==c):
ax.plot(fpr[c], tpr[c],linestyle=linestyles[m],linewidth=1,color=colors[c],label='{0}: ROC of {1} (area={2:0.4f})'.format(methods[m], classes_unique[c], roc_auc[c]))
# add some text for labels, title and axes ticks
if m==num_methods-1:
ax.set_ylim(0.0,1.0)
ax.set_xlim(0.0,1.0)
ax.set_ylabel("True Positive Rate",fontsize=12)
ax.set_xlabel("False Positive Rate",fontsize=12)
#ax.set_title("",fontsize=15)
ax.legend(loc="lower right",fontsize=8)
#plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
roc_auc_list=[roc_auc[c] for c in range(n_classes)]
roc_auc_list.extend([roc_auc["macro"],roc_auc["micro"],roc_auc["weighted"],roc_auc["samples"]])
roc_auc=np.array(roc_auc_list)
name=[methods[m]+"_AUROC_" + c for c in classes_unique]
name.extend(["macro","micro","weighted","samples"])
name=np.array(name)
roc_aucs[m]=roc_auc
names[m]=name
return roc_aucs,names
def con_mat_to_num(confusion_matrix):
"""
Compute performance given confusion matrix.
INPUTS:
confusion_matrix: list or numpy array.
OUTPUTS:
perf: numpy 1d array or vector of length 2*C+3,
[acc_0, acc_1, acc_{C-1}, accuracy, balanced accuracy].
For two-classes, perf=[sensitivity, specificity, PPV, NPV, accuracy, averaged sensitivity (or called balanced accuracy, that is 0.5*(sen+spec) ), averaged PVs (that is 0.5*(PPV+NPV) )].
For multi-classes, perf=[sen_0, sen_1, ..., sen_{C-1}, precision_0, precision_1, ..., precision_{C-1}, accuracy, averaged sensitivity, averaged percision] that is [class-wise rates, class-wise predictive rates, accuracy, averaged class-wise rate, averaged class-wise predictive rate].
"""
confusion_matrix=np.array(confusion_matrix) # convert to numpy array
numcl=confusion_matrix.shape[0] # number of classes
perf=np.zeros((2*numcl+3,)) # sensitivity_0,sensitivity_1,...,sensitivity_{C-1}, precision_0,precision_1,...,precision_{C-1}, accuracy, balanced sensitivity, balanced precision
perf[0:numcl]=confusion_matrix.diagonal()/confusion_matrix.sum(axis=1) # sensitivity and specifity for two classes, (class-wise rates for multi-classes)
perf[numcl:2*numcl]=confusion_matrix.diagonal()/confusion_matrix.sum(axis=0) # PPV and NPV for two classes, (class-wise predictive rates for multi-classes)
perf[2*numcl]=confusion_matrix.diagonal().sum()/confusion_matrix.sum(axis=1).sum() # accuracy
perf[2*numcl+1]=np.mean(perf[0:numcl]) # balanced accuracy for two-classes, average class-wise rate for multi-class
perf[2*numcl+2]=np.mean(perf[numcl:2*numcl]) # avarage class-wise predictive rate
return perf
def save_all_perfs_aurocs_auprcs(path="./",filename="perfs_aurocs_auprcs.txt",classes_unique=None,num_runs=1,perfs=None,aurocs=None,auprcs=None):
"""
Save the perfs, auROCs, and auPRCs from all runs.
classes_unique: list of strings, the unique class names.
num_runs: integer, number of runs.
perfs: numpy array, each row corresponds to a run.
aurocs: numpy array, each row corresponds to a run.
auprcs: numpy array, each row corresponds to a run.
"""
try:
os.makedirs(path)
except OSError:
pass
num_classes=len(classes_unique)
filename=path + "/" +filename
classes_unique_row=np.array(classes_unique)
classes_unique_row.shape=(1,len(classes_unique))
file_handle=file(filename,'w') # create a new file
file_handle.close()
file_handle=file(filename,'a') # open to append
if perfs is not None:
if num_classes==2:
header_perf=["Sensitivity","Specificity","PPV", "NPV", "Accuracy", "Balanced_Accuracy", "Averaged_PVs"]
else:
header_perf=["Classwise_Rate_" + c for c in classes_unique]
header_perf.extend(["Classwise_Predictive_Rate_" + c for c in classes_unique])
header_perf.extend(["Accuracy","Averaged_Classwise_Rate","Averaged_Classwise_Predictive_Rate"])
if num_runs==1:
perfs.shape(1,size(perfs))
else:
header_perf=[]
perfs=np.array([])
perfs.shape=(num_runs,0)
if aurocs is not None:
header_auroc=["AUROC_" + c for c in classes_unique]
header_auroc.extend(["macro","micro","weighted","samples"])
if num_runs==1:
aurocs.shape(1,size(aurocs))
else:
header_auroc=[]
aurocs=np.array([])
aurocs.shape=(num_runs,0)
if auprcs is not None:
header_auprc=["AUPRC_" + c for c in classes_unique]
header_auprc.extend(["macro","micro","weighted","samples"])
if num_runs==1:
auprcs.shape(1,size(auprcs))
else:
header_auprc=[]
auprcs=np.array([])
auprcs.shape=(num_runs,0)
# save header
header=[]
header.extend(header_perf)
header.extend(header_auroc)
header.extend(header_auprc)
header=np.array(header)
header.shape=(1,len(header))
np.savetxt(file_handle,header,fmt="%s",delimiter='\t')
# save all results
results=np.hstack((perfs,aurocs,auprcs))
np.savetxt(file_handle,results,fmt="%1.4f",delimiter="\t")
file_handle.close()
def save_perform(path="./",filename="mean_performances.txt",create_new_file=True,perf=None,std=None,auroc=None,auroc_std=None,auprc=None,auprc_std=None,conf_mat=None,classes_unique=None,training_time=None,test_time=None,stat_test=None):
"""
Save performance to a txt file.
INPUTS:
path: string, the path information, e.g. path="/home/yifengli/prog/my/DECREAS/result".
filename: string, the name of the txt file to save the performance, e.g. filename="performance.txt".
perf: numpy 1d array, the performance.
std: numpy 1d array, STD.
conf_mat: numpy 2d array, confusion matrix.
classes_unique: numpy 1d array, the unique class labels.
training_time: scalar.
test_time: scalar.
"""
try:
os.makedirs(path)
except OSError:
pass
num_classes=len(classes_unique)
filename=path + "/" +filename
classes_unique_row=np.array(classes_unique)
classes_unique_row.shape=(1,len(classes_unique))
if create_new_file:
file_handle=file(filename,'w') # create a new file
file_handle.close()
file_handle=file(filename,'a') # open to append
np.savetxt(file_handle,classes_unique_row,fmt="%s",delimiter='\t')
if perf is not None:
if num_classes==2:
header=["Sensitivity","Specificity","PPV", "NPV", "Accuracy", "Balanced_Accuracy", "Averaged_PVs"]
else:
header=["Classwise_Rate_" + c for c in classes_unique]
header.extend(["Classwise_Predictive_Rate_" + c for c in classes_unique])
header.extend(["Accuracy","Averaged_Classwise_Rate","Averaged_Classwise_Predictive_Rate"])
header=np.array(header)
header.shape=(1,len(header))
np.savetxt(file_handle,header,fmt="%s",delimiter='\t')
perf=np.array(perf)
perf.shape=(1,len(perf))
np.savetxt(file_handle,perf,fmt="%1.4f",delimiter="\t")
if std is not None:
std=np.array(std)
std.shape=(1,len(std))
np.savetxt(file_handle,std,fmt="%1.4f",delimiter="\t")
if auroc is not None:
np.savetxt(file_handle,["AUROC"],fmt="%s",delimiter="\t")
header=["AUROC_" + c for c in classes_unique]
header.extend(["macro","micro","weighted","samples"])
header=np.array(header)
header.shape=(1,len(header))
np.savetxt(file_handle,header,fmt="%s",delimiter='\t')
auroc=np.array(auroc)
auroc.shape=(1,len(auroc))
np.savetxt(file_handle,auroc,fmt="%1.4f",delimiter="\t")
if auroc_std is not None:
auroc_std=np.array(auroc_std)
auroc_std.shape=(1,len(auroc_std))
np.savetxt(file_handle,auroc_std,fmt="%1.4f",delimiter="\t")
if auprc is not None:
np.savetxt(file_handle,["AUPRC"],fmt="%s",delimiter="\t")
header=["AUPRC_" + c for c in classes_unique]
header.extend(["macro","micro","weighted","samples"])
header=np.array(header)
header.shape=(1,len(header))
np.savetxt(file_handle,header,fmt="%s",delimiter='\t')
auprc=np.array(auprc)
auprc.shape=(1,len(auprc))
np.savetxt(file_handle,auprc,fmt="%1.4f",delimiter="\t")
if auprc_std is not None:
auprc_std=np.array(auprc_std)
auprc_std.shape=(1,len(auprc_std))
np.savetxt(file_handle,auprc_std,fmt="%1.4f",delimiter="\t")
if conf_mat is not None:
np.savetxt(file_handle,["Confusion Matrix"],fmt="%s",delimiter="\t")
np.savetxt(file_handle,classes_unique_row,fmt="%s",delimiter="\t")
np.savetxt(file_handle,conf_mat,fmt="%d",delimiter="\t")
if training_time is not None:
np.savetxt(file_handle,["Training_Time"],fmt="%s",delimiter="\t")
np.savetxt(file_handle,np.array([training_time]),fmt="%1.4e",delimiter="\t")
if test_time is not None:
np.savetxt(file_handle,["Test_Time"],fmt="%s",delimiter="\t")
np.savetxt(file_handle,np.array([test_time]),fmt="%1.4e",delimiter="\t")
if stat_test is not None:
np.savetxt(file_handle,["Statistical_Test"],fmt="%s",delimiter="\t")
np.savetxt(file_handle,np.array([stat_test]),fmt="%1.4e",delimiter="\t")
#if training_time is not None and test_time is not None:
# np.savetxt(file_handle,["Training_Time"],fmt="%s",delimiter="\t")
# np.savetxt(file_handle,np.array([training_time,test_time]),fmt="%1.4e",delimiter="\t")
#if training_time is not None and test_time is None:
# np.savetxt(file_handle,np.array(training_time),fmt="%1.4e",delimiter="\t")
#if training_time is None and test_time is not None:
# np.savetxt(file_handle,np.array(test_time),fmt="%1.4e",delimiter="\t")
#np.savetxt(file_handle,np.array(test_time),fmt="%s",delimiter="\t")
file_handle.close()
def save_perform_old(path,filename,perf=None,std=None,conf_mat=None,classes_unique=None,training_time=None,test_time=None):
"""
Save performance to a txt file.
INPUTS:
path: string, the path information, e.g. path="/home/yifengli/prog/my/DECREAS/result".
filename: string, the name of the txt file to save the performance, e.g. filename="performance.txt".
perf: numpy 1d array, the performance.
std: numpy 1d array, STD.
conf_mat: numpy 2d array, confusion matrix.
classes_unique: numpy 1d array, the unique class labels.
training_time: scalar.
test_time: scalar.
"""
try:
os.makedirs(path)
except OSError:
pass
filename=path + "/" +filename
np.savetxt(filename,classes_unique,fmt="%s",delimiter='\t')
file_handle=file(filename,'a')
if perf is not None:
np.savetxt(file_handle,perf,fmt="%1.4f",delimiter="\t")
if std is not None:
np.savetxt(file_handle,std,fmt="%1.4f",delimiter="\t")
if conf_mat is not None:
np.savetxt(file_handle,conf_mat,fmt="%d",delimiter="\t")
if training_time is not None and test_time is not None:
np.savetxt(file_handle,np.array([training_time,test_time]),fmt="%1.4e",delimiter="\t")
if training_time is not None and test_time is None:
np.savetxt(file_handle,np.array(training_time),fmt="%1.4e",delimiter="\t")
if training_time is None and test_time is not None:
np.savetxt(file_handle,np.array(test_time),fmt="%1.4e",delimiter="\t")
#np.savetxt(file_handle,np.array(test_time),fmt="%s",delimiter="\t")
file_handle.close()
def change_max_num_epoch_change_learning_rate(max_num_epoch_change_learning_rate,max_num_epoch_change_rate):
max_num_epoch_change_learning_rate= int(math.ceil(max_num_epoch_change_rate * max_num_epoch_change_learning_rate))
if max_num_epoch_change_learning_rate<=20:
max_num_epoch_change_learning_rate=20
return max_num_epoch_change_learning_rate
def drange(start, stop, step):
"""
Generate a sequences of numbers.
"""
values=[]
r = start
while r <= stop:
values.append(r)
r += step
return values
def write_feature_weight(weights,features,lambda1s,filename):
"""
Write the weights of the input layer of a DFS model to a file. Only applicable to deep feature selection.
INPUTS:
weights: numpy 2d array or matrix,
rows corresponding to values of lambda1s,
columns corresponding to features.
features: numpy 1d array or vector, names of features.
lambda1s: numpy 1d array or vector, values of lambda1s.
filename: string, file name to be written.
OUTPUTS:
None.
"""
# example:
#weights=np.asarray([[1.1,2.2,3.4],[5.5,6.6,7.7]])
#features=np.asarray(['f1','f2','f3'],dtype=object)
#lambda1s=np.asarray([1.0,2.0])
#write_feature_weight(weights,features,lambda1s,filename='test.txt')
features=np.insert(features,0,'lambda')
weights=np.asarray(weights,dtype=object)
lambda1s=np.asanyarray(lambda1s,dtype=object)
lambda1s.resize((lambda1s.shape[0],1))
lambda1s_weights=np.hstack((lambda1s,weights))
features.resize((1,features.shape[0]))
features_lambda1s_weights=np.vstack((features,lambda1s_weights))
np.savetxt(filename,features_lambda1s_weights,fmt='%s',delimiter='\t')
def write_feature_weight2(weights=None, features=None, lambda1s=None, accuracy=None, uniqueness=False, tol=1e-4, filename='selected_features.txt',many_features=False):
"""
Write the weights of the input layer of a DFS and other information (accuracy, feature subsets) to a file. Only applicable to deep feature selection.
INPUTS:
weights: numpy 2d array or matrix,
rows corresponding to values of lambda1,
columns corresponding to features.
features: numpy 1d array or vector, name of features.
lambda1s: numpy 1d array or vector, values of lambda1.
accuracy: numpy 1d array or vector, accuracy corresponding to each lambda1.
This parameter is optional.
uniqueness: bool, indiates if only writing unique sizes of feature subsets.
tol: threshold, weights below tol*w_max are considered to be zeros.
filename: string, file name to be written.
OUTPUTS:
The output file is arranged as [lambda,accuracy,num_selected,feature_subset,weights_of_feature_subset]
"""
weights=np.asarray(weights,dtype=float)
lambda1s=np.asarray(lambda1s,dtype=float)
num_selected=np.zeros(len(lambda1s),dtype=int) # for each lambda, save the number of selected features
features_selected=np.zeros(len(lambda1s),dtype=object)
# get the numbers of selected features
for i in range(len(lambda1s)):
w=weights[i]
w_max=np.max(abs(w))
w_min=np.min(abs(w))
if tol*w_max<=w_min: # there is no element that is much larger: either none selected, or select all
continue
selected=(abs(w)>tol*w_max)
#selected=(abs(w)>tol)
num_selected[i]=selected.sum()
feat_selected=features[selected]
w_selected=w[selected]
ind=np.argsort(abs(w_selected))
ind=ind[::-1]
feat_selected=feat_selected[ind]
features_selected[i]=','.join(feat_selected)
# take the first non-zeros
if uniqueness:
if accuracy is not None:
_,_,take=take_max(num_selected,accuracy)
else:
take=take_first(num_selected)
else:
take=np.ones(len(num_selected),dtype=bool)
weights_take=weights[take]
lambda1s_take=lambda1s[take]
lambda1s_take.resize((lambda1s_take.shape[0],1))
lambda1s_take.round(decimals=6)
features_take=features_selected[take]
features_take.resize((features_take.shape[0],1))
num_take=num_selected[take]
# if no subset is selected
if num_take.shape[0]==0:
return None
# if the last one is zero, then it means that all features are selected
if num_take.shape[0]>1 and num_take[-1]==0 and num_take[-2]>0:
num_take[-1]=len(features)
features_take[-1]=','.join(features)
num_take.resize((num_take.shape[0],1))
if accuracy is not None:
accuracy=np.asarray(accuracy,dtype=float)
accuracy_take=accuracy[take]
accuracy_take.resize((accuracy_take.shape[0],1))
accuracy_take.round(decimals=4)
features=np.insert(features,0,['lambda','accuracy','num_selected','feature_subset'])
features.resize((1,features.shape[0]))
if not many_features:
data=np.hstack((lambda1s_take,accuracy_take, num_take,features_take,weights_take))
data=np.vstack((features,data))
else:
header=np.array(['lambda','accuracy','num_selected'])
header.resize((1,header.shape[0]))
data=np.hstack((lambda1s_take,accuracy_take, num_take))
data=np.vstack((header,data))
else:
if not many_features:
features=np.insert(features,0,['lambda','num_selected','feature_subset'])
features.resize((1,features.shape[0]))
data=np.hstack((lambda1s_take,num_take,features_take,weights_take))
data=np.vstack((features,data))
else:
header=np.array(['lambda','num_selected'])
header.resize((1,header.shape[0]))
data=np.hstack((lambda1s_take, num_take))
data=np.vstack((header,data))
np.savetxt(filename,data,fmt='%s',delimiter='\t')
def take_first(nums):
"""
Return the first distinct nonzeros.
Yifeng Li in UBC.
Aug 30, 2014.
Example:
nums=[0,0,0,1,2,2,2,3,4,4,5,5,5,5,6,7,7,8]
take_first(nums)
"""
take=np.zeros(len(nums),dtype=bool)
if len(nums)==1:
if nums[0]!=0:
take[0]=True
return take
i=0
while i<len(nums)-1:
if nums[i]==0:
i=i+1
continue
if i==0 and nums[i]==nums[i+1]:
take[i]=True
if i>0 and nums[i-1]==0:
take[i]=True
if i==0 and nums[i] != nums[i+1]:
take[i]=True
take[i+1]=True
if nums[i] != nums[i+1]:
take[i+1]=True
i=i+1
return take
def take_max(num_feat,acc):
num_feat=np.array(num_feat,dtype=int)
acc=np.array(acc,dtype=float)
indices_num=np.arange(len(num_feat))
us=np.unique(num_feat)
num_feat_max=[]
acc_max=[]
indices_num_max=[]
for u in us:
ind=num_feat==u
num_feat_this=num_feat[ind]
acc_this=acc[ind]
indices_num_this=indices_num[ind]
max_ind=np.argmax(acc_this)
num_feat_max.extend([u])
acc_max.extend([acc_this[max_ind]])
indices_num_max.extend([indices_num_this[max_ind]])
return np.array(num_feat_max,dtype=int),np.array(acc_max,dtype=float),np.array(indices_num_max,dtype=int)
def take_max_acc_for_each_feature_size(num_feat,acc,feat_subset):
num_feat=np.array(num_feat,dtype=int)
acc=np.array(acc,dtype=float)
feat_subset=np.array(feat_subset,dtype=object)
us=np.unique(num_feat)
num_feat_max=[]
acc_max=[]
feat_subset_max=[]
for u in us:
ind=num_feat==u
num_feat_this=num_feat[ind]
acc_this=acc[ind]
feat_subset_this=feat_subset[ind]
max_ind=np.argmax(acc_this)
num_feat_max.extend([u])
acc_max.extend([acc_this[max_ind]])
feat_subset_max.extend([feat_subset_this[max_ind]])
return np.array(num_feat_max,dtype=int),np.array(acc_max,dtype=float),np.array(feat_subset_max,dtype=object)
def reduce_sample_size(data,classes,times=2):
"""
Reduce sample by to 1/times.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
times: int.
OUTPUTS:
data: the reduced data.
clases: the reduced classes.
"""
data=data[range(0,data.shape[0],times)]
classes=classes[range(0,classes.shape[0],times)]
return data,classes
def take_some_features(data,features,given=None):
"""
Use a subset of given features for vectoral samples.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
features: numpy 1d array for features.
given: numpy 1d array or list for features to be used. given=None will use all features.
OUTPUTS:
data: numpy 2d array or matrix, the data using given features.
features: numpy 1d array, used features.
"""
if given is None:
return data,features
common,ind1,ind2=take_common_features(features,given)
data=data[:,ind1]
features=features[ind1]
return data,features
def exclude_some_features(data,features,given=None):
"""
Exclude some features for vectoral samples.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
features: numpy 1d array for features.
given: numpy 1d array or list for features to be excluded. given=None will use all features.
OUTPUTS:
data: numpy 2d array or matrix, the data excluding given features.
features: numpy 1d array, remaining features.
"""
if given is None:
return data,features
common,ind1,ind2=take_common_features(features,given)
data=np.delete(data,ind1,axis=1)
features=np.delete(features,ind1)
return data,features
def take_some_features_matrical_samples(data,features,given=None):
"""
Use a subset of given features for matrical samples.
"""
num_sample=data.shape[0]
feat_total=data.shape[1]
num_signal=len(features)
feat_each=feat_total//num_signal
if given is None:
return data,features
common,ind1,ind2=take_common_features(features,given)
data=data.reshape((num_sample,num_signal,feat_each))
data=data[:,ind1,:]
features=features[ind1]
data=data.reshape((num_sample,len(features)*feat_each))
return data,features
def exclude_some_features_matrical_samples(data,features,given=None):
"""
Exclude some features for matrical samples.
"""
num_sample=data.shape[0]
feat_total=data.shape[1]
num_signal=len(features)
feat_each=feat_total//num_signal
if given is None:
return data,features
common,ind1,ind2=take_common_features(features,given)
data=data.reshape((num_sample,num_signal,feat_each))
data=np.delete(data,ind1,axis=1)
features=np.delete(features,ind1)
data=data.reshape((num_sample,(len(features))*feat_each))
return data,features
def take_unique_features(data,features,rm_features=None):
"""
Take unqiue features and make the change in the data accordingly.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
features: numpy 1d array for features.
rm_features: numpy 1d array of strings or list of strings, the features to be removed.
OUTPUTS:
data: the data with unique sorted features.
features: the unique sorted features.
"""
unik,ind=np.unique(features,return_index=True)
features=unik
# remove unwanted features
if rm_features is not None:
ind_keep=np.array([True]*len(features))
for rmf in rm_features:
ind_keep[features==rmf]=False
features=features[ind_keep]
ind=ind[ind_keep]
data=data[:,ind]
return data,features
def take_unique_features_large(filename_data,filename_features,filename_data_save,filename_features_save,rm_features=None,block_size=1000):
"""
Take unqiue features and make the change in a big data accordingly. Write the resulted data into a txt file.
"""
# read the features from file
features_org=np.loadtxt(filename_features,delimiter='\t',dtype=object)
# create a new file to save processed data
filename_data_save_handle=file(filename_data_save,'w')
filename_data_save_handle.close()
# open the new file to save data sequentially
filename_data_save_handle=file(filename_data_save,'a')
filename_data_handle=open(filename_data,'r')
count=0
start=0
data_block=[]
end_of_file=False
print "Start processing ..."
while not end_of_file:
line=filename_data_handle.readline()
if line=='':
end_of_file=True
else:
if start==0:
data_block=[]
# remove "\n" at the end
data_line=line[0:-1]
# split the string to substrings
data_line=data_line.split('\t')
# append the current line to the block
data_block.append(data_line)
# increase total count
count=count+1
# get a full block or partial block at the end
if start==block_size-1 or (end_of_file and start!=0):
print "processing the %d-th line ..." %count
### process the block ###
data_block=np.array(data_block,dtype=str)
data_block,features=take_unique_features(data_block,features_org,rm_features)
# append to file
np.savetxt(filename_data_save_handle,data_block,fmt='%s',delimiter='\t')
### finished processing the block ###
# reset the counts of lines in the block (0-based)
start=0
else:
start=start+1
filename_data_handle.close()
filename_data_save_handle.close()
print "Done! %d lines are processed." %count
print "The features are:"
print features
# save feature list
np.savetxt(filename_features_save,features,fmt='%s',delimiter='\t')
def take_common_features(feat1,feat2):
"""
Return common features and their indices.
INPUTS:
feat1: numpy 1d array, feature set 1
feat2: numpy 1d array, feature set 2
OUTPUTS:
common: numpy 1d array, the common features.
ind1: numpy 1d array, the indices of the common features in feature set 1.
ind2: numpy 1d array, the indices of the common features in feature set 2.
"""
common=np.intersect1d(feat1,feat2) # sorted
ind1=find_indices(common,feat1)
ind2=find_indices(common,feat2)
return common,ind1,ind2
def find_indices(subset,fullset):
"""
Find the indices of a subset in the fullset. If an element of subset is not in fullset, its index in fullset will be -1.
Example:
# subset=np.array(["f1","f2","f3"])
# fullset=np.array(["f7","f5","f2","f4","f6","f3","f1"])
# indices=find_indices(subset,fullset)
"""
nsub=len(subset)
indices=-np.ones(subset.shape,dtype=int)
indices_full=np.arange(0,len(fullset),1,dtype=int)
for s in range(0,nsub):
indices[s]=indices_full[fullset==subset[s]] # numerical indices
return indices
def plot_bar(filename, data, std=None, xlab='x', ylab='y', ylim=[0,1],yticks=np.arange(0,1.1,0.1), title='Bar-Plot', methods=None, datasets=None, figwidth=8, figheight=6, colors=None, legend_loc="lower center", xytick_fontsize=12, xylabel_fontsize=15, title_fontsize=15, legend_fontsize=12):
"""
Plot grouped bars given a vector.
data: 1d-array, each element represents the result of a method.
"""
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
data=np.array(data)
num_methods=len(data)
# colors
if colors is None:
colors=['b','r','g','c','m','y','k','w'] # maximally 8 colors allowed so far
ind = np.arange(num_methods) # the x locations of the bars
width = 0.8 # the width of the bars
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
if std is None:
ax.bar(ind,data,width,color=colors[0:num_methods],ecolor='k')
else:
ax.bar(ind,data,width,color=colors[0:num_methods],yerr=std,ecolor='k')
# add some text for labels, title and axes ticks
ax.set_ylabel(ylab,fontsize=xylabel_fontsize)
ax.set_xlabel(xlab,fontsize=xylabel_fontsize)
ax.set_title(title,fontsize=title_fontsize)
ax.set_xticks(ind+0.5*width)
ax.set_xticklabels( methods )
#if ylim is None:
# yticks=np.arange(0,1.1,0.1)
# ylim=[0,1]
if yticks is not None:
ax.set_yticks(yticks)
if ylim is not None:
ax.set_ylim(ylim[0],ylim[1])
plt.setp(ax.get_xticklabels(), fontsize=xytick_fontsize)
plt.setp(ax.get_yticklabels(), fontsize=xytick_fontsize)
# shrink axis box
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
#ax.legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=legend_fontsize )
#ax.legend(methods, loc=legend_loc, fontsize=legend_fontsize )
#plt.show()
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
def plot_bar_group(filename, data, std=None, xlab='x', ylab='y', title='Bar-Plot', methods=None, hatchs=None, datasets=None, figwidth=8, figheight=6, colors=None, legend_loc="lower left", xytick_fontsize=12, xylabel_fontsize=15, title_fontsize=15, legend_fontsize=12,ymin=0,ymax=1,rotation=45):
"""
Plot grouped bars given a matrix. Group by datasets.
data: 2d-array, #methods X #datasets, each row represents the result of a method on multiple data sets.
"""
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
data=np.array(data)
num_methods,num_datasets=data.shape
if hatchs is None:
hatchs=[None]*len(methods)
# colors
if colors is None:
colors=['b','r','g','c','m','y','k','w'] # maximally 8 colors allowed so far
ind = np.arange(num_datasets) # the x locations for the groups
width = 0.8*(1.0/num_methods) # the width of the bars
method_bar=[]
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
#fig, ax = plt.subplots()
for i in range(num_methods):
if std is None:
method_bar.append( ax.bar(ind+i*width, data[i,:], width, color=colors[i], ecolor='k', edgecolor='black', linewidth=0.5, hatch=hatchs[i]))
else:
std=np.array(std)
method_bar.append( ax.bar(ind+i*width, data[i,:], width, color=colors[i], yerr=std[i,:], ecolor='k', edgecolor='black', linewidth=0.5, hatch=hatchs[i]))
# add some text for labels, title and axes ticks
ax.set_ylabel(ylab,fontsize=xylabel_fontsize)
ax.set_xlabel(xlab,fontsize=xylabel_fontsize)
ax.set_title(title,fontsize=title_fontsize)
ax.set_xticks(ind+0.5*num_methods*width)
ax.set_xticklabels(datasets, rotation=rotation)
ax.set_yticks(np.arange(0,1.1,0.1))
ax.set_ylim(ymin,ymax)
ax.set_xlim(-0.5,len(datasets)+1)
plt.setp(ax.get_xticklabels(), fontsize=xytick_fontsize)
plt.setp(ax.get_yticklabels(), fontsize=xytick_fontsize)
# shrink axis box
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
#ax.legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=legend_fontsize )
ax.legend( method_bar, methods, loc=legend_loc, fontsize=legend_fontsize )
#plt.show()
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
def plot_dot_group(filename, data, std=None, xlab='x', ylab='y', title='Dot-Plot', methods=None, markers=None, datasets=None, figwidth=8, figheight=6, colors=None, legend_loc="lower left", xytick_fontsize=12, xylabel_fontsize=15, title_fontsize=15, legend_fontsize=12,ymin=0,ymax=1,rotation=45):
"""
Plot grouped dots given a matrix. Group by datasets.
data: 2d-array, #methods X #datasets, each row represents the result of a method on multiple data sets.
"""
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
data=np.array(data)
num_methods,num_datasets=data.shape
# colors
if colors is None:
colors=['b','r','g','c','m','y','k','w'] # maximally 8 colors allowed so far, make it robust later
if markers is None:
markers=["s","*","^","+","x","p","d","o","v"] # make it robust later
ind = np.arange(num_datasets) # the x locations for the groups
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
for i in range(num_methods):
ax.plot(ind,data[i,:],color=colors[i],linestyle="", marker=markers[i],markerfacecolor=colors[i],markersize=12)
# add some text for labels, title and axes ticks
ax.set_ylabel(ylab,fontsize=xylabel_fontsize)
ax.set_xlabel(xlab,fontsize=xylabel_fontsize)
ax.set_title(title,fontsize=title_fontsize)
ax.set_xticks(ind)
ax.set_xticklabels(datasets, rotation=rotation)
ax.set_yticks(np.arange(0,1.1,0.05))
ax.set_ylim(ymin,ymax)
ax.set_xlim(ind[0]-0.5,ind[-1]+0.5)
plt.setp(ax.get_xticklabels(), fontsize=xytick_fontsize)
plt.setp(ax.get_yticklabels(), fontsize=xytick_fontsize)
# shrink axis box
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
#ax.legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=legend_fontsize )
ax.legend( methods, loc=legend_loc, fontsize=legend_fontsize )
#plt.show()
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
def plot_3dbar_group(filename, data, std=None, xlab='x', ylab='y', zlab='z', title='3D-Bar-Plot', methods=None, datasets=None, figwidth=4, figheight=3, colors=None, legend_loc="lower left", width=0.5, xytick_fontsize=8, xylabel_fontsize=8, title_fontsize=8, legend_fontsize=8):
"""
Plot grouped 3d-bars given a matrix.
data: 2d-array, each row represents the result of a method on multiple data sets.
"""
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d
#print mpl.__version__
#print os.path.abspath(mpl.__file__)
data=np.array(data)
num_methods,num_datasets=data.shape
# colors
if colors is None:
colors=['b','r','g','c','m','y','k','w'] # maximally 8 colors allowed so far
fig = plt.figure(figsize=(figwidth,figheight))
ax = fig.add_subplot(111, projection='3d') # new version
#ax = Axes3D(fig) # prior version 1.0.0
for d in range(num_datasets):
x=np.arange(num_methods)
y=data[:,d]
z=d
color=colors[d]
ax.bar(left=x, height=y, zs=z, width=width, zdir='y', color=color, alpha=0.8)
#if std is None:
#ax.bar(left=x, height=y, zs=z, width=0.5, zdir='y', color=color, alpha=0.8)
#else:
#std=np.array(std)
#err=std[:,d]
#ax.bar(left=x, height=y, zs=z, zdir='y', color=color, alpha=0.8, zerr=err, ecolor='k')
# add some text for labels, title and axes ticks
ax.set_ylabel('\n'+ylab, linespacing=3, fontsize=xylabel_fontsize) # labelpad does not work
ax.set_xlabel('\n'+xlab, linespacing=2, fontsize=xylabel_fontsize)
ax.set_zlabel(zlab,fontsize=xylabel_fontsize)
ax.set_title(title,fontsize=title_fontsize)
ax.set_xticks(np.arange(num_methods))
ax.set_xticklabels( methods )
ax.set_zticks(np.arange(0,1.1,0.1))
ax.set_ylim(0,num_datasets)
ax.set_yticks(np.arange(num_datasets)+1)
ax.set_yticklabels( datasets )
ax.set_zlim(0,1)
plt.setp(ax.get_xticklabels(), fontsize=xytick_fontsize, rotation=-15)
plt.setp(ax.get_yticklabels(), fontsize=xytick_fontsize)
plt.setp(ax.get_zticklabels(), fontsize=xytick_fontsize)
#plt.show()
fig.savefig(filename)
plt.close(fig)
def plot_bar_group_subplots(filename, datas, stds=None, xlabs='x', ylabs='y', titles='Bar-Plot', methods=None, datasets=None, figwidth=8, figheight=6, colors=None, legend_loc="lower left", xytick_fontsize=8, xylabel_fontsize=8, title_fontsize=8, legend_fontsize=8, num_col=2, ymin=None, ymax=None):
"""
Plot subplots of the (classification) performance.
datas: a list of numpy data matrices, each of which for each subplot.
stds: a list of numpy error matrices, each of which for each subplot, can be None.
xlabs: a list of strings, the lable of the x-axis, can be just a string.
ylabs: a list of strings, the lable of the y-axis, can be just a string.
titles: a list of strings, the titles of the subplots, can be just a string.
methods: a list of lists of strings, the legend of each group, can be just a list of strings.
datasets: a list of lists of the group names, can be just a list of trings.
figwidth: scalar, width in inches of the figure.
figheight: scalar, height in inches of the figures.
colors: colors.
legend_locs: srting, location of the legend.
xytick_fontsize: scalar, the font size of the x and y ticks.
xylable_fontsize: scalar, the font size of the x and y labels.
legend_fontsize: scalar, the font size of the legend.
num_col: integer, number of subplots in each column.
"""
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
num_plots=len(datas)
num_row=int(math.ceil(num_plots/float(num_col)))
if not isinstance(xlabs,list):
xlabs=[xlabs]*num_plots
if not isinstance(ylabs,list):
ylabs=[ylabs]*num_plots
if not isinstance(titles,list):
xlabs=[titles]*num_plots
if not isinstance(methods[0],list):
methods=methods*num_plots
if not isinstance(datasets[0],list):
datasets=datasets*num_plots
# colors
if colors is None:
colors=['b','r','g','c','m','y','k','w'] # maximally 8 colors allowed so far
if not isinstance(colors[0],list):
colors=colors*num_plots
print colors
fig,ax=plt.subplots(num_row,num_col,sharex='col')
fig.set_size_inches(figwidth,figheight)
for p in range(num_plots):
# obtain data for current subplot
data=datas[p]
data=np.array(data)
if stds is None:
std=None
else:
std=stds[p]
std=np.array(std)
xlab=xlabs[p]
ylab=ylabs[p]
title=titles[p]
print data
num_methods,num_datasets=data.shape
ind = np.arange(num_datasets) # the x locations for the groups
width = 0.8*(1.0/num_methods) # the width of the bars
method_bar=[]
ax_row=p/num_col
ax_col=p%num_col
for i in range(num_methods):
if std is None:
method_bar.append( ax[ax_row,ax_col].bar(ind+i*width, data[i,:], width, color=colors[p][i], ecolor='k'))
else:
method_bar.append( ax[ax_row,ax_col].bar(ind+i*width, data[i,:], width, color=colors[p][i], yerr=std[i,:], ecolor='k'))
# add some text for labels, title and axes ticks
ax[ax_row,ax_col].set_ylabel(ylab,fontsize=xylabel_fontsize)
ax[ax_row,ax_col].set_xlabel(xlab,fontsize=xylabel_fontsize)
ax[ax_row,ax_col].set_title(title,fontsize=title_fontsize)
ax[ax_row,ax_col].set_xticks(ind+0.5*num_methods*width)
ax[ax_row,ax_col].set_xticklabels( datasets, rotation=45 )
ax[ax_row,ax_col].set_yticks(np.arange(0,1.1,0.1))
ax[ax_row,ax_col].set_ylim(ymin[p],ymax[p])
plt.setp(ax[ax_row,ax_col].get_xticklabels(), fontsize=xytick_fontsize)
plt.setp(ax[ax_row,ax_col].get_yticklabels(), fontsize=xytick_fontsize)
# shrink axis box
#box = ax[ax_row,ax_col].get_position()
#ax[ax_row,ax_col].set_position([box.x0, box.y0, box.width * 0.8, box.height])
#ax[ax_row,ax_col].legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=legend_fontsize )
print methods[p]
ax[ax_row,ax_col].legend( method_bar, methods[p], loc='lower center', fontsize=legend_fontsize )
#plt.show()
fig.savefig(filename)
plt.close(fig)
def plot_box_multi(filename, data, classes, classes_unique=None, xlab='x', ylab='y', title='Box-Plot', figwidth=8, figheight=6, ymin=0, ymax=10):
"""
Plot multiple boxes in a plot according to class information.
data: 1d-array.
classes: class information to plot the boxes.
classes_unique: the unique class labels.
"""
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
data=np.array(data)
if classes_unique is None:
class_unique=np.unique(classes)
data_plot=[]
for cl in classes_unique:
data_cl=data[classes==cl]
data_plot.append(data_cl)
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
ax.boxplot(data_plot)
# add some text for labels, title and axes ticks
ax.set_ylim(ymin,ymax)
ax.set_ylabel(ylab,fontsize=12)
ax.set_xlabel(xlab,fontsize=12)
ax.set_title(title,fontsize=15)
ind = np.arange(len(classes_unique))
#ax.set_xticks(ind)
ax.set_xticklabels( classes_unique )
plt.setp(ax.get_xticklabels(), fontsize=12, rotation=90)
plt.setp(ax.get_yticklabels(), fontsize=12)
# shrink axis box
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
#ax.legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=12 )
#plt.show()
plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
def feat_acc_fit(feat_nums,accs,feat_subsets=None,tangent=1):
"""
Fit the (number_features_selected,accuracy) pairs by tangent, and return fitted parameters, and number of features and corresponding accuracy given tangent.
"""
# fit the hyperbolic tangent sigmoid curve
from scipy.optimize import curve_fit
#def hyperbolic_tangent_sigmoid(x,k):
# return 2/(1+np.exp(-2*k*x))-1
#popt,pcov=curve_fit(hyperbolic_tangent_sigmoid,feat_nums,accs)
#k=popt[0]
#print popt
## get number of features and corresponding accuracy given a value of tangent
#if k>0 and k<tangent:
# sys.exit()
## denote u=exp(-2kx)
#u_1=2*k-tangent + 2*np.sqrt(k*(k-tangent))
#u_2=2*k-tangent - 2*np.sqrt(k*(k-tangent))
#u=None
#if u_1>0 and u_1<1:
# u=u_1
# print "u_1 fufil the condition :)"
#if u_2>0 and u_2<1:
# u=u_2
# print "u_2 fufil the condition :)"
#x_tangent=-np.log(u)/(2*k)
#acc_tangent= hyperbolic_tangent_sigmoid(x_tangent,k)
#x_for_plot=np.linspace(np.min(feat_nums),np.max(feat_nums),1000)
#y_for_plot=hyperbolic_tangent_sigmoid(x_for_plot,k)
def arctan_func(x,k,s):
return 2*s*np.arctan(k*x)/math.pi
popt,pcov=curve_fit(arctan_func,feat_nums,accs)
k=popt[0]
s=popt[1]
print popt
# get number of features and corresponding accuracy given a value of tangent
if 2*k*s-tangent*math.pi<=0:
print "error, exit!"
sys.exit()
x_tangent=(math.sqrt((2*k*s-tangent*math.pi)/(tangent*math.pi)))/k
acc_tangent= arctan_func(x_tangent,k,s)
x_for_plot=np.linspace(np.min(feat_nums),np.max(feat_nums),1000)
y_for_plot=arctan_func(x_for_plot,k,s)
return popt,pcov,x_tangent,acc_tangent,x_for_plot,y_for_plot
def plot_3D_surface(X,Y,Z,dir_save="./",prefix="Parameter1_Parameter_2_Accuracy",figwidth=6,figheight=6,xlab="X",ylab="Y",zlab="Z",xyzlabel_fontsize=8,xyztick_fontsize=8,fmt="pdf",dpi=600):
# X,Y,Z,: 2D numpy array. Z[i,j]=f(X[i,j],Y[i,j])
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cbook
from matplotlib import cm
from matplotlib.colors import LightSource
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(figwidth,figheight))
ax = fig.add_subplot(111, projection='3d') # new version
#fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
ls = LightSource(270, 45)
#rgb = ls.shade(Z, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
#surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=rgb, linewidth=0, antialiased=False, shade=False)
surf=ax.plot_surface(X, Y, Z, cmap='hot', cstride=1, rstride=1,linewidth=0.5)
ax.set_xlabel(xlab,linespacing=3, fontsize=xyzlabel_fontsize)
ax.set_ylabel(ylab,linespacing=3, fontsize=xyzlabel_fontsize)
ax.set_zlabel(zlab,linespacing=3, fontsize=xyzlabel_fontsize)
plt.setp(ax.get_xticklabels(), fontsize=xyztick_fontsize)
plt.setp(ax.get_yticklabels(), fontsize=xyztick_fontsize)
plt.setp(ax.get_zticklabels(), fontsize=xyztick_fontsize)
X_unique=np.unique(X)
ax.set_xticks(X_unique)
ax.set_xticklabels( np.array(X_unique,dtype=str) )
ax.set_xlim(np.min(X_unique),np.max(X_unique))
Y_unique=np.unique(Y)
ax.set_yticks(Y_unique)
ax.set_yticklabels( np.array(Y_unique,dtype=str) )
ax.set_ylim(np.min(Y_unique),np.max(Y_unique))
ax.set_zticks(np.arange(0,1.1,0.1))
ax.set_zlim(0,1)
#plt.show()
filename=dir_save+prefix+"_3d_surface."+fmt
plt.tight_layout()
fig.savefig(filename,format=fmt,dpi=dpi)
plt.close(fig)
def Bernoulli_sampling(P=0.5,size=None,rng=np.random.RandomState(100)):
"""
Bernoulli sampling for RMBs and DBMs.
P: scalar or numpy array, holding the parameter p of Bernoulli distributions.
size: list or dict, the size of sampled array when P is a scalar, when P is an array, it has the same size of P.
rng: random number generator.
"""
if (not np.isscalar(P)):
size=P.shape
if (np.isscalar(P) and size is None):
size=1
S=rng.random_sample(size=size)
return np.array(S<P,dtype=int)
def Gaussian_sampling(mu=1,beta=1,size=None,rng=np.random.RandomState(100)):
"""
Gaussian sampling for RMBs and DBMs.
mu: scalar or numpy array, holding the mean of Gaussian distributions.
beta: scalar or numpy array, holding the precision of Gaussian distribution.
size: list or dict, the size of sampled array when P is a scalar, when P is an array, it has the same size of P.
rng: random number generator.
"""
if (not np.isscalar(mu)):
size=mu.shape
if len(size.shape)==1: # vector
size=(len(mu),1) # make it 2d
mu.shape=size
X=mu
for i in range(size[0]):
for j in range(size[1]):
X[i,j]=rng.normal(mu[i,j],1/math.sqrt(beta[i]),size=1)
if (np.isscalar(mu) and size is None):
size=1
X=rng.normal(mu,beta,size=size)
return X
def sigmoid(X):
"""
Compute value of sigmoid function.
INPUTS:
X: scalar or numpy array.
OUTPUTS:
Numpy array of sigmoid values.
"""
if isinstance(X,(list,tuple)):
X=np.array(X)
return 1/(1+np.exp(-X))
def data_distribution(X,dir_save="./", prefix="X", figwidth=5, figheight=4, color="red", ymax=None, ylab="Number of Features", qs=range(5,105,5),width=0.02):
"""
X: numpy array of binary values, each row is a feature.
"""
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
rowmeanX=np.mean(X,axis=1)
##percentile
#q=range(0,101,5)
#pctl=numpy.percentile(rowmeanX,q)
#print pctl
# plot histogram
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
ax.hist(rowmeanX, bins=100, range=None, normed=False, weights=None, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical',color=color)
ax.set_ylabel(ylab,fontsize=10)
ax.set_xlabel("Frequency in Samples",fontsize=10)
if ymax is not None:
ax.set_ylim(0,ymax)
filename=dir_save + "fig_hist_" + prefix + ".pdf"
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
# plot bar plot
#print "Thresholding..."
#qs=range(5,105,5)
qs=np.array(qs)
qs=qs/100
counts=[]
for q in qs:
print q
cnt=np.sum(rowmeanX>q)
counts.extend([cnt])
fig=plt.figure(num=1,figsize=(figwidth,figheight))
ax=fig.add_subplot(1,1,1)
ax.bar(qs-width/2.0, counts, width=width, bottom=None, color=color)
ax.set_ylabel(ylab + " Above Threshold",fontsize=10)
ax.set_xlabel("Threshold of Frequency in Samples",fontsize=10)
#ax.set_ylim(0,20000)
filename=dir_save + "fig_hist_" + prefix + "_threshold.pdf"
fig.savefig(filename,bbox_inches='tight')
plt.close(fig)
|
yifeng-li/DECRES
|
classification.py
|
Python
|
bsd-3-clause
| 98,929
|
[
"Gaussian"
] |
59d6d557a004309735616532d400e56b9793ffdb6b7b9adc9d1537ae064c37a8
|
import os
import sys
from pyspark import SparkContext, SparkConf, SparkFiles
from pyspark.sql import SQLContext, Row
from pbox_description import pbox_description
from subprocess import Popen, PIPE
from os_utils import check_file_exists, download_file
from vina_utils import get_value_from_box_center, get_value_from_box_size
from json_utils import create_json_file
try:
import configparser
except ImportError:
import ConfigParser as configparser
def load_pbox_file(file_of_pdbid_list):
list_ret = []
f_file = open(file_of_pdbid_list, "r")
for line in f_file:
splited_line = str(line).split()
pdb_id = str(splited_line[0]).strip()
obj = pbox_description(pdb_id)
list_ret.append(obj)
return list_ret
if __name__ == '__main__':
sc = SparkContext()
sqlCtx = SQLContext(sc)
config = configparser.ConfigParser()
config.read('config.ini')
# Vina configuration for broadcast
vina_program = config.get('VINA', 'vina_program')
pythonsh = config.get('VINA', 'pythonsh')
script_receptor4 = config.get('VINA', 'script_receptor4')
script_ligand4 = config.get('VINA', 'script_ligand4')
script_pdbqt_to_pdb = config.get('VINA', 'script_pdbqt_to_pdb')
exhaustiveness = config.get('VINA', 'exhaustiveness')
path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')
gromacs_path = config.get('DRUGDESIGN', 'gromacs_path')
# Adding Python Source file
sc.addPyFile(os.path.join(path_spark_drugdesign, "pbox_description.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign, "os_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign, "vina_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign, "json_utils.py"))
# Broadcast
vina_program = sc.broadcast(vina_program)
pythonsh = sc.broadcast(pythonsh)
script_receptor4 = sc.broadcast(script_receptor4)
script_ligand4 = sc.broadcast(script_ligand4)
script_pdbqt_to_pdb = sc.broadcast(script_pdbqt_to_pdb)
exhaustiveness = sc.broadcast(exhaustiveness)
gromacs_path = sc.broadcast(gromacs_path)
file_of_pdbid_list = sys.argv[1]
check_file_exists(file_of_pdbid_list)
def run_prepare_box(prepare_box_obj):
# Downloads the receptor pdb file
file_name = prepare_box_obj.get_pdb_id() + '.pdb'
url = 'http://www.rcsb.org/pdb/files/' + file_name
download_file(url, file_name)
# Removes waters, ligands and metals with ADT
command = ''.join([pythonsh.value,
' ',
script_receptor4.value,
' -r ',
prepare_box_obj.get_pdb_id(),
'.pdb',
' -o ',
'temporary_',
prepare_box_obj.get_pdb_id(),
'.pdbqt ',
'-A ',
'none ',
'-U ',
'waters ',
'-e ',
'>/dev/null 2>/dev/null'])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Converts the .pdbqt back to .pdb
command = ''.join([pythonsh.value,
' ',
script_pdbqt_to_pdb.value,
' -f ',
'temporary_',
prepare_box_obj.get_pdb_id(),
'.pdbqt',
' -o ',
prepare_box_obj.get_pdb_id(),
'_ok.pdb',
'>/dev/null 2>/dev/null'])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Delete the temporary pdbqt
command = ''.join(['rm temporary_',
prepare_box_obj.get_pdb_id(),
'.pdbqt'])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Uses gromacs to detect the box. note that the receptor is rotated!
# This means that the pdb_id_box.pdb file must be used for visualizations
command = ''.join(['echo System | ',
gromacs_path.value,
'gmx editconf ',
' -f ',
prepare_box_obj.get_pdb_id(),
'_ok.pdb',
' -o ',
prepare_box_obj.get_pdb_id(),
'_box.pdb',
' -d ',
' 0.5 ',
' -bt ',
' triclinic ',
' -angles ',
' 90 90 90 ',
' -princ ',
' -c ',
' 2>/dev/null '
"| grep 'new center' ",
"| awk '{print $4,$5,$6}' "])
proc = Popen(command, shell=True, stdout=PIPE)
box_center, _ = proc.communicate()
box_center = box_center.decode("utf-8")
box_center = get_value_from_box_center(box_center)
command = ''.join(['rm ',
prepare_box_obj.get_pdb_id(),
'_box.pdb'])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
command = ''.join(["echo 'System' | ",
gromacs_path.value,
'gmx editconf ',
' -f ',
prepare_box_obj.get_pdb_id(),
'_ok.pdb',
' -o ',
prepare_box_obj.get_pdb_id(),
'_box.pdb',
' -d ',
' 0.5 ',
' -bt ',
' triclinic ',
' -angles ',
' 90 90 90 ',
' -princ ',
' -c ',
'2>/dev/null',
"| grep 'new box vectors' ",
"| awk '{print $5,$6,$7}' "])
proc = Popen(command, shell=True, stdout=PIPE)
box_size, _ = proc.communicate()
box_size = box_size.decode("utf-8")
box_size = get_value_from_box_size(box_size)
# Deletes the temporary .pdb
command = ''.join(['rm ',
prepare_box_obj.get_pdb_id(),
'.pdb'])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
command = ''.join(['rm ',
prepare_box_obj.get_pdb_id(),
'_box.pdb'])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
command = ''.join(['rm ',
prepare_box_obj.get_pdb_id(),
'_ok.pdb'])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Creating Json output
data = dict(box_center, **box_size)
output_file_name = prepare_box_obj.get_pdb_id() + '_box.json'
create_json_file(output_file_name, data)
list_obj_prepare_box = load_pbox_file(file_of_pdbid_list)
sing_dockRDD = sc.parallelize(list_obj_prepare_box)
sing_dockRDD.foreach(run_prepare_box)
|
rodrigofaccioli/drugdesign
|
virtualscreening/vina/spark/prepare_box.py
|
Python
|
apache-2.0
| 7,824
|
[
"Gromacs"
] |
27c6d7a8092d267a013a4f8ae11f4ad182e4841e2d496da1a18a70b26d998cc4
|
from urlparse import urlparse
import tarfile
import pytest
import os
from ..automation import TaskManager
import utilities
TEST_SITES = [
'http://google.com',
'http://facebook.com',
'http://youtube.com',
'http://yahoo.com',
'http://baidu.com',
'http://wikipedia.org',
'http://qq.com',
'http://linkedin.com',
'http://taobao.com',
'http://twitter.com',
'http://live.com',
'http://amazon.com',
'http://sina.com.cn',
'http://google.co.in',
'http://hao123.com',
'http://blogspot.com',
'http://weibo.com',
'http://wordpress.com',
'http://yandex.ru',
'http://yahoo.co.jp'
]
psl = utilities.get_psl()
class TestCrawl():
""" Runs a short test crawl.
This should be used to test any features that require real
crawl data. This should be avoided if possible, as controlled
tests will be easier to debug
"""
NUM_BROWSERS = 1
def get_config(self, data_dir):
manager_params, browser_params = TaskManager.load_default_params(self.NUM_BROWSERS)
manager_params['data_directory'] = data_dir
manager_params['log_directory'] = data_dir
manager_params['db'] = os.path.join(manager_params['data_directory'],
manager_params['database_name'])
browser_params[0]['profile_archive_dir'] = os.path.join(data_dir, 'browser_profile')
browser_params[0]['headless'] = True
return manager_params, browser_params
@pytest.mark.slow
def test_browser_profile_coverage(self, tmpdir):
""" Test the coverage of the browser's profile
This verifies that Firefox's places.sqlite database contains
all visited sites (with a few exceptions). If it does not,
it is likely the profile is lost at some point during the crawl
"""
# Run the test crawl
data_dir = os.path.join(str(tmpdir), 'data_dir')
manager_params, browser_params = self.get_config(data_dir)
manager = TaskManager.TaskManager(manager_params, browser_params)
for site in TEST_SITES:
manager.get(site)
ff_db_tar = os.path.join(browser_params[0]['profile_archive_dir'],
'profile.tar.gz')
manager.close(post_process=False)
# Extract crawl profile
with tarfile.open(ff_db_tar) as tar:
tar.extractall(browser_params[0]['profile_archive_dir'])
# Output databases
ff_db = os.path.join(browser_params[0]['profile_archive_dir'],
'places.sqlite')
crawl_db = manager_params['db']
# Grab urls from crawl database
rows = utilities.query_db(crawl_db, "SELECT url FROM http_requests")
req_ps = set() # visited domains from http_requests table
for url, in rows:
req_ps.add(psl.get_public_suffix(urlparse(url).hostname))
hist_ps = set() # visited domains from CrawlHistory Table
successes = dict()
rows = utilities.query_db(crawl_db, "SELECT arguments, bool_success "
"FROM CrawlHistory WHERE command='GET'")
for url, success in rows:
ps = psl.get_public_suffix(urlparse(url).hostname)
hist_ps.add(ps)
successes[ps] = success
# Grab urls from Firefox database
profile_ps = set() # visited domains from firefox profile
rows = utilities.query_db(ff_db, "SELECT url FROM moz_places")
for host, in rows:
try:
profile_ps.add(psl.get_public_suffix(urlparse(host).hostname))
except AttributeError:
pass
# We expect urls to be in the Firefox profile if:
# 1. We've made requests to it
# 2. The url is a top_url we entered into the address bar
# 3. The url successfully loaded (see: Issue #40)
# 4. The site does not respond to the initial request with a 204 (won't show in FF DB)
missing_urls = req_ps.intersection(hist_ps).difference(profile_ps)
unexpected_missing_urls = set()
for url in missing_urls:
if successes[url] == 0 or successes[url] == -1:
continue
# Get the visit id for the url
rows = utilities.query_db(crawl_db,
"SELECT visit_id FROM site_visits "
"WHERE site_url = ?",
('http://' + url,))
visit_id = rows[0]
rows = utilities.query_db(crawl_db,
"SELECT COUNT(*) FROM http_responses "
"WHERE visit_id = ?",
(visit_id,))
if rows[0] > 1:
continue
rows = utilities.query_db(crawl_db,
"SELECT response_status, location FROM "
"http_responses WHERE visit_id = ?",
(visit_id,))
response_status, location = rows[0]
if response_status == 204:
continue
if location == 'http://': # site returned a blank redirect
continue
unexpected_missing_urls.add(url)
assert len(unexpected_missing_urls) == 0
|
Jasonmk47/OpenWPM
|
test/test_crawl.py
|
Python
|
gpl-3.0
| 5,398
|
[
"VisIt"
] |
9cf77d0adc65899d0c7a07ff07354518681a8031e6c1b268b064d855abc65458
|
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from __future__ import with_statement
import __builtin__
import unittest
from splinter.exceptions import DriverNotFoundError
from fake_webapp import EXAMPLE_APP
from test_webdriver_chrome import chrome_installed
from test_webdriver_firefox import firefox_installed
class BrowserTest(unittest.TestCase):
def patch_driver(self, pattern):
self.old_import = __builtin__.__import__
def custom_import(name, *args, **kwargs):
if pattern in name:
return None
return self.old_import(name, *args, **kwargs)
__builtin__.__import__ = custom_import
def unpatch_driver(self, module):
__builtin__.__import__ = self.old_import
reload(module)
def browser_can_change_user_agent(self, webdriver):
from splinter import Browser
browser = Browser(driver_name=webdriver, user_agent="iphone")
browser.visit(EXAMPLE_APP + "useragent")
result = 'iphone' in browser.html
browser.quit()
return result
def test_brower_can_still_be_imported_from_splinters_browser_module(self):
from splinter.browser import Browser
def test_should_work_even_without_zope_testbrowser(self):
self.patch_driver('zope')
from splinter import browser
reload(browser)
self.assertNotIn('zope.testbrowser', browser._DRIVERS)
self.unpatch_driver(browser)
def test_should_raise_an_exception_when_browser_driver_is_not_found(self):
with self.assertRaises(DriverNotFoundError):
from splinter import Browser
Browser('unknown-driver')
@unittest.skipIf(not firefox_installed(), 'firefox is not installed')
def test_firefox_should_be_able_to_change_user_agent(self):
self.assertTrue(self.browser_can_change_user_agent('firefox'))
@unittest.skipIf(not chrome_installed(), 'chrome is not installed')
def test_chrome_should_be_able_to_change_user_agent(self):
self.assertTrue(self.browser_can_change_user_agent('chrome'))
def test_zope_testbrowser_should_be_able_to_change_user_agent(self):
self.assertTrue(self.browser_can_change_user_agent('zope.testbrowser'))
|
bubenkoff/splinter
|
tests/test_browser.py
|
Python
|
bsd-3-clause
| 2,373
|
[
"VisIt"
] |
7d6121e6a407707f7c86c2341c9e0568043ad2690bf5df434d374d4399e281ef
|
# proxy module
from __future__ import absolute_import
from mayavi.tools.modules import *
|
enthought/etsproxy
|
enthought/mayavi/tools/modules.py
|
Python
|
bsd-3-clause
| 89
|
[
"Mayavi"
] |
ca21a3dcb224e6b53d1574674db0040ef3b741ad2df90e3e905f1048b0478da5
|
# -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliss (jonasgliss@gmail.com)
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""example script 001 - Save plots of all SO2 CD images of test dataset.
This script prepares an AA image list for calibration (using DOAS calib
data) and loops over the list to save all SO2 CD images
"""
from __future__ import (absolute_import, division)
from os.path import join, exists
from os import mkdir
import sys
import pyplis
# Imports from other example scripts
from ex04_prep_aa_imglist import prepare_aa_image_list
from matplotlib import rcParams
rcParams.update({'font.size': 15})
sys.path.append(join(".."))
# Plot settings
CDMIN = -1.8e18
CDMAX = 1.8e18
DPI = 150
FORMAT = "png"
# OPTIONS
CROP_IDX = 3
TEST = 0 # save only first image in time series
BLURRING = 1 # Gaussian blurring applied to images
PYRLEVEL = 1 # Scale space level (Gauss pyramid)
# Relevant paths
SAVE_DIR = join(pyplis._LIBDIR, "..", "scripts", "scripts_out", "all_cd_imgs")
if not exists(SAVE_DIR):
mkdir(SAVE_DIR)
CALIB_FILE = join("..", "scripts_out",
"pyplis_doascalib_id_aa_avg_20150916_0706_0721.fts")
if not exists(CALIB_FILE):
raise IOError("path to CALIB_FILE not found %s" % CALIB_FILE)
CORR_MASK_FILE = join("..", "scripts_out", "aa_corr_mask.fts")
if not exists(CORR_MASK_FILE):
raise IOError("path to CORR_MASK_FILE not found %s" % CORR_MASK_FILE)
# SCRIPT MAIN FUNCTION
if __name__ == "__main__":
from matplotlib.pyplot import close
# Load AA list
aa_list = prepare_aa_image_list()
aa_list.add_gaussian_blurring(BLURRING)
aa_list.pyrlevel = PYRLEVEL
# Load DOAS calbration data and FOV information (see example 6)
doascalib = pyplis.doascalib.DoasCalibData()
doascalib.load_from_fits(file_path=CALIB_FILE)
doascalib.fit_calib_polynomial()
# Load AA corr mask and set in image list(is normalised to DOAS FOV see
# ex7)
aa_corr_mask = pyplis.Img(CORR_MASK_FILE)
aa_list.aa_corr_mask = aa_corr_mask
aa_list.sensitivity_corr_mode = True
# set DOAS calibration data in image list
aa_list.calib_data = doascalib
aa_list.calib_mode = True
aa_list.goto_img(CROP_IDX)
ax = aa_list.current_img().show(zlabel=r"$S_{SO2}$ [cm$^{-2}$]",
vmin=CDMIN, vmax=CDMAX)
if TEST:
ax.set_title(aa_list.current_time_str(), fontsize=18)
ax.figure.savefig(join(SAVE_DIR, "TEST.%s" % FORMAT), format=FORMAT,
dpi=DPI)
else:
for k in range(1, aa_list.nof - 2 * CROP_IDX):
ax.set_title(aa_list.current_time_str(), fontsize=18)
ax.figure.savefig(join(SAVE_DIR, "%d.%s" % (k, FORMAT)),
format=FORMAT, dpi=DPI)
close("all")
aa_list.next_img()
ax = aa_list.current_img().show(zlabel=r"$S_{SO2}$ [cm$^{-2}$]",
vmin=CDMIN, vmax=CDMAX)
|
jgliss/pyplis
|
scripts/more_scripts/ex001_save_all_calib_imgs.py
|
Python
|
gpl-3.0
| 3,637
|
[
"Gaussian"
] |
c83b4966150e6df9de2b572d49c759ffcc4641b45867129644c8a215b9f543ad
|
"""Unfinished code for ZMQ/HTTP bridging. We use WebSockets instead.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import json
import logging
from tornado import web
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class ZMQHandler(web.RequestHandler):
def get_stream(self):
"""Get the ZMQStream for this request."""
raise NotImplementedError('Implement get_stream() in a subclass.')
def _save_method_args(self, *args, **kwargs):
"""Save the args and kwargs to get/post/put/delete for future use.
These arguments are not saved in the request or handler objects, but
are often needed by methods such as get_stream().
"""
self._method_args = args
self._method_kwargs = kwargs
def _handle_msgs(self, msg):
msgs = [msg]
stream = self.get_stream()
stream.on_recv(lambda m: msgs.append(json.loads(m)))
stream.flush()
stream.stop_on_recv()
logging.info("Reply: %r" % msgs)
self.write(json.dumps(msgs))
self.finish()
class ZMQPubHandler(ZMQHandler):
SUPPORTED_METHODS = ("POST",)
def post(self, *args, **kwargs):
self._save_method_args(*args, **kwargs)
try:
msg = json.loads(self.request.body)
except:
self.send_error(status_code=415)
else:
logging.info("Request: %r" % msg)
self.get_stream().send_json(msg)
class ZMQSubHandler(ZMQHandler):
SUPPORTED_METHODS = ("GET",)
@web.asynchronous
def get(self, *args, **kwargs):
self._save_method_args(*args, **kwargs)
self.get_stream().on_recv(self._handle_msgs)
class ZMQDealerHandler(ZMQHandler):
SUPPORTED_METHODS = ("POST",)
@web.asynchronous
def post(self, *args, **kwargs):
self._save_method_args(*args, **kwargs)
logging.info("request: %r" % self.request)
try:
msg = json.loads(self.request.body)
except:
self.send_error(status_code=415)
else:
logging.info("Reply: %r" % msg)
stream = self.get_stream()
stream.send_json(msg)
stream.on_recv(self._handle_msgs)
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/zmqhttp.py
|
Python
|
lgpl-3.0
| 2,824
|
[
"Brian"
] |
71b1c6ff7ab6dfe06229362a83a7952307ee09bffc98a4f448a1273afceab7e9
|
# coding=utf-8
"""List lesson feature tests."""
import pytest
from django.contrib.auth.models import User
from model_mommy import mommy
from pytest_bdd import (
given,
scenario,
then,
when,
)
from quiz.models import Unity, Lesson, Result
@pytest.mark.django_db
@scenario('features/list_lessons.feature', 'Lesson\'s compelted must be shown')
def test_lessons_compelted_must_be_shown():
"""Lesson's compelted must be shown."""
@pytest.mark.django_db
@scenario('features/list_lessons.feature', 'Lesson\'s dificulty must be shown')
def test_lessons_dificulty_must_be_shown():
"""Lesson's dificulty must be shown."""
@pytest.mark.django_db
@scenario('features/list_lessons.feature', 'List lessons available')
def test_list_lessons_available():
"""List lessons available."""
@given('Jack has completed "Lesson 1"')
def jack_has_completed_lesson_1(browser):
"""Jack has completed "Lesson 1"."""
@given('Jack, an logged user in our system')
def jack_an_logged_user_in_our_system(browser):
"""Jack, an logged user in our system."""
User.objects.create_user(username='jack', password='q1w2e3')
browser.visit('http://localhost:8000')
browser.fill('username', 'jack')
browser.fill('password', 'q1w2e3')
browser.find_by_id('submit').first.click()
@given('Lessons available are:')
def lessons_available_are():
"""Lessons available are:."""
@given('| Unity | Lesson | Dificulty |')
def _unity____lesson____dificulty_():
"""| Unity | Lesson | Dificulty |."""
@given('| Unity 1 | Lesson 1 | Easy |')
def _unity_1__lesson_1__easy______():
"""| Unity 1 | Lesson 1 | Easy |."""
unity = mommy.make(Unity, name='Unity 1', level=1)
mommy.make(Lesson, id=78, name='Lesson OR', unity=unity)
assert Unity.objects.filter(name='Unity 1').exists()
@given('| Unity 2 | Lesson 2 | Medium |')
def _unity_2__lesson_2__medium____():
"""| Unity 2 | Lesson 2 | Medium |."""
unity = mommy.make(Unity, name='Unity 2', level=2)
mommy.make(Lesson, name='Lesson AND', unity=unity)
assert Unity.objects.filter(name='Unity 2').exists()
@when('Jack is in the home page')
def jack_is_in_the_home_page(browser):
"""Jack is in the home page."""
assert 'http://localhost:8000/' in browser.url
@then('Jack gets a list of available lessons')
def jack_gets_a_list_of_available_lessons(browser):
"""Jack gets a list of available lessons."""
browser.reload()
assert browser.is_text_present('Level 1 - Unity 1') is True
assert browser.is_text_present('Level 2 - Unity 2') is True
@then('Jack sees that "Lesson 1" is completed')
def jack_sees_that_lesson_1_is_completed(browser):
"""Jack sees that "Lesson 1" is completed."""
mommy.make(Result, lesson=Lesson.objects.get(id=78), user=User.objects.get(username='jack'))
browser.reload()
assert bool(browser.find_by_xpath("//i[@class='fui-check-inverted']")) is True
|
jesuejunior/golingo
|
quiz/tests/functional/test_list_lessons.py
|
Python
|
bsd-3-clause
| 2,945
|
[
"VisIt"
] |
e98ea518268ed515200c4eb59a3afc4055c28008cda60c246b42535902e20d68
|
# A collection of tools to remotely access a CATMAID server via its API
#
# Copyright (C) 2017 Philipp Schlegel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""This module contains functions to request data from Catmaid server.
Examples
--------
>>> import pymaid
>>> # HTTP_USER AND HTTP_PASSWORD are only necessary if your server requires a
>>> # http authentification
>>> myInstance = pymaid.CatmaidInstance('www.your.catmaid-server.org',
... 'api_token',
... 'http_user', # Omit if not required
... 'http_password')
>>> # Get skeletal data for two neurons
>>> neuron_list = pymaid.get_neuron(['12345', '67890'], myInstance)
>>> neuron_list[0]
type <class 'pymaid.CatmaidNeuron'>
neuron_name Example neuron name
skeleton_id 12345
n_nodes 9924
n_connectors 437
n_branch_nodes 207
n_end_nodes 214
cable_length 1479.81
review_status NA
annotations False
igraph False
tags True
dtype: object
"""
import datetime
import re
import urllib
import webbrowser
import navis as ns
import numpy as np
import networkx as nx
import pandas as pd
from . import core, utils, config, cache
from navis import in_volume
__all__ = sorted(['get_annotation_details', 'get_annotation_id',
'get_annotation_list', 'get_annotations', 'get_arbor',
'get_connector_details', 'get_connectors',
'get_connector_tags',
'get_contributor_statistics', 'get_edges', 'get_history',
'get_logs', 'get_names', 'get_neuron',
'get_neurons', 'get_neurons_in_bbox',
'get_neurons_in_volume', 'get_node_tags', 'get_node_details',
'get_nodes_in_volume', 'get_partners',
'get_partners_in_volume', 'get_paths', 'get_review',
'get_review_details', 'get_skids_by_annotation',
'get_skids_by_name', 'get_node_info',
'get_node_table', 'get_user_annotations',
'get_user_list', 'get_volume', 'has_soma', 'neuron_exists',
'get_segments',
'get_connectors_between', 'url_to_coordinates',
'get_label_list', 'find_neurons',
'get_skid_from_node', 'get_transactions',
'get_connector_links',
'get_nth_partners', 'find_nodes',
'get_node_location', 'get_annotated',
'get_neuron_id',
'get_connectors_in_bbox',
'get_cable_lengths',
'get_connectivity_counts',
'get_import_info',
'get_origin', 'get_skids_by_origin',
'get_sampler', 'get_sampler_domains', 'get_sampler_counts',
'get_skeleton_change'])
# Set up logging
logger = config.logger
@cache.undo_on_error
def get_neuron(x, with_connectors=True, with_tags=True, with_history=False,
with_merge_history=False, with_abutting=False, return_df=False,
fetch_kwargs={}, init_kwargs={}, raise_missing=True,
remote_instance=None):
"""Retrieve 3D skeleton data as CatmaidNeuron/List.
Parameters
----------
x
Can be either:
1. list of skeleton ID(s), int or str
2. list of neuron name(s), str, exact match
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
with_connectors : bool, optional
If True, will include connector data.
Note: the CATMAID API endpoint does currently not
support retrieving abutting connectors this way.
Please use ``with_abutting=True`` to include
abutting connectors.
with_tags : bool, optional
If True, will include node tags.
with_history: bool, optional
If True, the returned node data will contain
creation date and last modified for each
node.
ATTENTION: if ``with_history=True``, nodes/connectors
that have been moved since their creation will have
multiple entries reflecting their changes in position!
Each state has the date it was modified as creation
date and the next state's date as last modified. The
most up to date state has the original creation date
as last modified.
The creator_id is always the original creator though.
with_abutting: bool, optional
If True, will retrieve abutting connectors.
For some reason they are not part of compact-json, so
they have to be retrieved via a separate API endpoint
-> will show up as connector type 3!
return_df : bool, optional
If True, a ``pandas.DataFrame`` instead of
``CatmaidNeuron``/``CatmaidNeuronList`` is returned.
fetch_kwargs : dict, optional
Above BOOLEAN parameters can also be passed as dict.
This is then used in CatmaidNeuron objects to
override implicitly set parameters!
init_kwargs : dict, optional
Keyword arguments passed when initializing
``CatmaidNeuron``/``CatmaidNeuronList``.
raise_missing : bool, optional
If True and any of the queried neurons can not be
found, raise an exception. Else just log a warning.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
:class:`~pymaid.CatmaidNeuron`
For single neurons.
:class:`~pymaid.CatmaidNeuronList`
For a list of neurons.
pandas.DataFrame
If ``return_df=True``
Notes
-----
The returned objects contain for each neuron::
neuron_name : str
skeleton_id : str
nodes / connectors : pandas.DataFrames containing node/connector
ID, coordinates, parent nodes, etc.
tags : dict containing the node tags:
``{'tag': [node_id, node_id, ...]}``
Dataframe column titles for ``nodes`` and ``connectors`` should be
self-explanatory with the exception of ``relation`` in connector table.
This columns describes the connection ("relation") from the neuron's
node TO the connector::
connectors['relation']
0 = "presynaptic_to" -> this is a presynapse for this neuron
1 = "postsynaptic_to" -> this is a postsynapse for this neuron
2 = "gapjunction_with"
3 = "abutting" (not returned by default)
-1 = other (hypothetical as CATMAID does only return the above)
Examples
--------
>>> # Get a single neuron by skeleton id
>>> n = pymaid.get_neuron(16)
>>> # Get a bunch of neurons by annotation
>>> n = pymaid.get_neuron('annotation:glomerulus DA1')
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
# Update from kwargs if available
with_tags = fetch_kwargs.get('with_tags', with_tags)
with_connectors = fetch_kwargs.get('with_connectors', with_connectors)
with_history = fetch_kwargs.get('with_history', with_history)
with_merge_history = fetch_kwargs.get('with_merge_history', with_merge_history)
with_abutting = fetch_kwargs.get('with_abutting', with_abutting)
return_df = fetch_kwargs.get('return_df', return_df)
# Generate URLs to retrieve
urls = [remote_instance._get_compact_details_url(s,
with_history=str(with_history).lower(),
with_tags=str(with_tags).lower(),
with_connectors=str(with_connectors).lower(),
with_merge_history=str(with_merge_history).lower()) for s in x]
skdata = remote_instance.fetch(urls, desc='Fetch neurons')
# Retrieve abutting
if with_abutting:
urls = [remote_instance._get_connector_links_url(**{'skeleton_ids[0]': str(s),
'relation_type': 'abutting'}) for s in x]
cn_data = remote_instance.fetch(urls, desc='Fetch abutting cn')
# Add abutting to other connectors in skdata with type == 3
for i, cn in enumerate(cn_data):
if not with_history:
skdata[i][1] += [[c[7], c[1], 3, c[2], c[3], c[4]]
for c in cn['links']]
else:
skdata[i][1] += [[c[7], c[1], 3, c[2], c[3], c[4], c[8], None]
for c in cn['links']]
# Get neuron names
names = get_names(x, remote_instance=remote_instance)
# Parse column names
node_cols = ['node_id', 'parent_id', 'creator_id', 'x', 'y', 'z',
'radius', 'confidence']
cn_cols = ['node_id', 'connector_id', 'type', 'x', 'y', 'z']
if with_history:
node_cols += ['last_modified', 'creation_date', 'still_on_skeleton']
cn_cols += ['last_modified', 'creation_date']
missing = [s for s, d in zip(x, skdata) if not d[0]]
if missing:
msg = 'The following skeleton ID(s) could not be found: {}'.format(', '.join(missing))
if raise_missing:
raise ValueError(msg)
else:
logger.warning(msg)
# Convert data to appropriate dtypes
node_dtypes = {'node_id': np.int32,
'parent_id': np.int32,
'creator_id': 'category',
'x': np.float32,
'y': np.float32,
'z': np.float32,
'radius': np.float32,
'confidence': 'category'}
cn_dtypes = {'node_id': np.int32,
'type': 'category',
'connector_id': np.int32,
'x': np.float32,
'y': np.float32,
'z': np.float32}
def make_node_table(x):
"""Generate node table (incl. correct data types)."""
df = pd.DataFrame(x, columns=node_cols).fillna(-1) # do not remove fillna
return df.astype(node_dtypes)
def make_cn_table(x):
"""Generate connector table (incl. correct data types)."""
df = pd.DataFrame(x, columns=cn_cols)
return df.astype(cn_dtypes)
# Generate DataFrame containing all neurons
df = pd.DataFrame([[names[str(x[i])], # neuron name
str(x[i]), # skeleton ID
make_node_table(n[0]), # nodes
make_cn_table(n[1]), # connectors
n[2] # tags as dictionary
] for i, n in enumerate(skdata) if n[0]],
columns=['neuron_name', 'skeleton_id',
'nodes', 'connectors', 'tags'])
if return_df:
return df
nl = core.CatmaidNeuronList(df, remote_instance=remote_instance, **init_kwargs)
return nl[0] if len(nl) == 1 and len(x) == 1 else nl
# This is for legacy reasons -> will remove eventually
get_neurons = get_neuron
@cache.undo_on_error
def get_arbor(x, node_flag=1, connector_flag=1, tag_flag=1, remote_instance=None):
"""Retrieve skeleton data for a list of skeleton ids.
Similar to :func:`pymaid.get_neuron` but the connector data includes
the whole chain::
node1 -> (link_confidence) -> connector -> (link_confidence) -> node2
This means that connectors can shop up multiple times (i.e. if they have
multiple postsynaptic targets). Does include connector ``x, y, z``
coordinates!
Parameters
----------
x
Neurons to retrieve. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
connector_flag : 0 | 1, optional
Set if connector data should be retrieved.
tag_flag : 0 | 1, optional
Set if tags should be retrieved.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron::
neuron_name skeleton_id nodes connectors tags
0 str str DataFrame DataFrame dict
1
2
Notes
-----
- nodes and connectors are pandas.DataFrames themselves
- tags is a dict: ``{'tag': [node_id, node_id, ...]}``
Dataframe (df) column titles should be self explanatory with these exception:
- ``df['relation_1']`` describes node1 to/from connector
- ``df['relation_2']`` describes node2 to/from connector
- ``relation`` can be: ``0`` (presynaptic), ``1`` (postsynaptic), ``2`` (gap junction)
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
skdata = []
for s in config.tqdm(x, desc='Retrieving arbors', disable=config.pbar_hide,
leave=config.pbar_leave):
# Create URL for retrieving example skeleton from server
remote_compact_arbor_url = remote_instance._get_compact_arbor_url(
s, node_flag, connector_flag, tag_flag)
# Retrieve node_data for example skeleton
arbor_data = remote_instance.fetch(remote_compact_arbor_url)
skdata.append(arbor_data)
logger.debug('%s retrieved' % str(s))
names = get_names(x, remote_instance)
df = pd.DataFrame([[
names[str(x[i])],
str(x[i]),
pd.DataFrame(n[0], columns=['node_id', 'parent_id', 'creator_id',
'x', 'y', 'z', 'radius', 'confidence']),
pd.DataFrame(n[1], columns=['node_1', 'link_confidence',
'connector_id', 'link_confidence',
'node_2', 'other_skeleton_id',
'relation_1', 'relation_2']),
n[2]]
for i, n in enumerate(skdata)
],
columns=['neuron_name', 'skeleton_id', 'nodes', 'connectors', 'tags'],
dtype=object
)
return df
@cache.undo_on_error
def get_partners_in_volume(x, volume, syn_threshold=None, min_size=2,
remote_instance=None):
"""Retrieve the synaptic/gap junction partners within a CATMAID Volume.
Important
---------
Connectivity (total number of connections) returned is restricted to
that volume.
Parameters
----------
x
Neurons to check. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
volume : str | list of str | navis.Volume
Name of the CATMAID volume to test OR volume dict with
{'vertices':[],'faces':[]} as returned by e.g.
:func:`~pymaid.get_volume()`.
syn_threshold : int, optional
Synapse threshold. This threshold is applied to the
TOTAL number of synapses across all neurons!
min_size : int, optional
Minimum node count of partner
(default = 2 -> hide single-node partner).
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron and the number of
synapses with the query neurons::
neuron_name skeleton_id num_nodes relation skid1 skid2 ...
1 name1 skid1 node_count1 upstream n_syn n_syn ..
2 name2 skid2 node_count2 downstream n_syn n_syn .
3 name3 skid3 node_count3 gapjunction n_syn n_syn .
- Relation can be: upstream (incoming), downstream (outgoing) of the
neurons of interest or gap junction
- partners can show up multiple times if they are e.g. pre- AND
postsynaptic
- the number of connections between two partners is restricted to the
volume
See Also
--------
:func:`~pymaid.get_neurons_in_volume`
Get all neurons within given volume.
:func:`~pymaid.filter_connectivity`
Filter connectivity table or adjacency matrix by volume(s) or to
parts of neuron(s).
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
# First, get list of connectors
cn_data = get_connectors(x, remote_instance=remote_instance)
# Find out which connectors are in the volume of interest
if isinstance(volume, str):
volume = get_volume(volume, remote_instance=remote_instance)
elif isinstance(volume, (list, np.ndarray)):
for i in range(len(volume)):
if isinstance(volume[i], str):
volume[i] = get_volume(volume[i],
remote_instance=remote_instance)
iv = in_volume(cn_data[['x', 'y', 'z']], volume)
# Get the subset of connectors within the volume
cn_in_volume = cn_data[iv].copy()
logger.info('{} unique connectors in volume. Reconstructing connectivity'
'...'.format(len(cn_in_volume.connector_id.unique())))
# Get details for connectors in volume
cn_details = get_connector_details(cn_in_volume.connector_id.unique(),
remote_instance=remote_instance)
# Filter those connectors that don't have a presynaptic node
cn_details = cn_details[~cn_details.presynaptic_to.isnull()]
# Now reconstruct connectivity table from connector details
# Some connectors may be connected to the same neuron multiple times
# In those cases there will be more node IDs in "postsynaptic_to_node"
# than there are skeleton IDs in "postsynaptic_to". Then we need to map
# node IDs to neurons
mismatch = cn_details[cn_details.postsynaptic_to.apply(
len) < cn_details.postsynaptic_to_node.apply(len)]
match = cn_details[cn_details.postsynaptic_to.apply(
len) >= cn_details.postsynaptic_to_node.apply(len)]
if not mismatch.empty:
logger.info('Retrieving additional details for {0} '
'connectors'.format(mismatch.shape[0]))
tn_to_skid = get_skid_from_node([tn for l in mismatch.postsynaptic_to_node.values for tn in l],
remote_instance=remote_instance)
else:
tn_to_skid = []
# Now collect edges
edges = [[cn.presynaptic_to, skid]
for cn in match.itertuples() for skid in cn.postsynaptic_to]
edges += [[cn.presynaptic_to, tn_to_skid[tn]]
for cn in mismatch.itertuples() for tn in cn.postsynaptic_to_node]
# Turn edges into synaptic connections
unique_edges, counts = np.unique(edges, return_counts=True, axis=0)
unique_skids = np.unique(edges).astype(str)
unique_edges = unique_edges.astype(str)
# Create empty adj_mat
adj_mat = pd.DataFrame(np.zeros((len(unique_skids), len(unique_skids))),
columns=unique_skids, index=unique_skids)
for i, e in enumerate(config.tqdm(unique_edges,
disable=config.pbar_hide,
desc='Adj. matrix',
leave=config.pbar_leave)):
# using df.at here speeds things up tremendously!
adj_mat.loc[str(e[0]), str(e[1])] = counts[i]
# There is a chance that our original neurons haven't made it through
# filtering (i.e. they don't have partners in the volume ). We will simply
# add these rows and columns and set them to 0
missing = [n for n in x if n not in adj_mat.columns]
for n in missing:
adj_mat[n] = 0
missing = [n for n in x if n not in adj_mat.index]
for n in missing:
adj_mat.loc[n] = [0 for i in range(adj_mat.shape[1])]
# Generate connectivity table
all_upstream = adj_mat.T[adj_mat.T[x].sum(axis=1) > 0][x]
all_upstream['skeleton_id'] = all_upstream.index
all_upstream['relation'] = 'upstream'
all_downstream = adj_mat[adj_mat[x].sum(axis=1) > 0][x]
all_downstream['skeleton_id'] = all_downstream.index
all_downstream['relation'] = 'downstream'
# Merge tables
df = pd.concat([all_upstream, all_downstream], axis=0, ignore_index=True)
# We will use this to get name and size of neurons
logger.info('Collecting additional info for {0} neurons'.format(
len(df.skeleton_id.unique())))
review = get_review(df.skeleton_id.unique(),
remote_instance=remote_instance)
num_nodes = review.set_index('skeleton_id').total_node_count.to_dict()
names = get_names(df.skeleton_id.unique(), remote_instance=remote_instance)
df['neuron_name'] = df.skeleton_id.map(names)
df['num_nodes'] = df.skeleton_id.map(num_nodes)
df['total'] = df[x].sum(axis=1)
# Filter for min size
df = df[df.num_nodes >= min_size]
# Filter for synapse threshold
if syn_threshold:
df = df[df.total >= syn_threshold]
# Reorder columns
df = df[['neuron_name', 'skeleton_id', 'num_nodes', 'relation', 'total'] + x]
df.sort_values(['relation', 'total'], inplace=True, ascending=False)
return df.reset_index(drop=True)
@cache.undo_on_error
def get_nth_partners(x, n_circles=1, min_pre=2, min_post=2,
remote_instance=None):
"""Retrieve Nth partners.
Partners that are directly (``n_circles = 1``) or via N "hops"
(``n_circles>1``) connected to a set of seed neurons.
Parameters
----------
x
Seed neurons for which to retrieve partners. Can be:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
n_circles : int, optional
Number of circles around your seed neurons.
min_pre/min_post : int, optional
Synapse threshold. Set to -1 to not get any pre-/post
synaptic partners.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame each row represents a partner::
neuron_name skeleton_id
0 name1 123
1 name2 456
2 ... ...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_circles_of_hell_url()
post = {'n_circles': n_circles, 'min_pre': min_pre, 'min_post': min_post}
post.update({'skeleton_ids[{}]'.format(i): s for i, s in enumerate(x)})
# Returns list of skids [0] and names dict [1]
resp = remote_instance.fetch(url, post=post)
# If no neurons returned, return empty DataFrame
if resp[1]:
# Generate DataFrame
df = pd.DataFrame.from_dict(resp[1], orient='index').reset_index()
df.columns = ['skeleton_id', 'neuron_name']
else:
df = pd.DataFrame([], columns=['skeleton_id', 'neuron_name'])
return df
@cache.undo_on_error
def get_partners(x, threshold=1, min_size=2, filt=[], min_confidence=1,
directions=['incoming', 'outgoing',
'gapjunctions', 'attachments'],
remote_instance=None):
"""Retrieve partners connected by synapses, gap junctions or attachments.
Note
----
This function treats multiple fragments with the same skeleton ID
(e.g. from splits into axon & dendrites) as a single neuron when fetching
data from the server. For "fragmented" connectivity use
:func:`~pymaid.cn_table_from_connectors` instead.
Parameters
----------
x
Neurons for which to retrieve partners. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
threshold : int, optional
Minimum # of links (synapses/gap-junctions/etc).
min_size : int, optional
Minimum node count of partner
(default=2 to hide single-node partners).
filt : list of str, optional
Filters partners for neuron names (must be exact) or
skeleton_ids.
min_confidence : int | None, optional
If set, edges with lower confidence will be ignored.
Applied before ``threshold``.
directions : 'incoming' | 'outgoing' | 'gapjunctions' | 'attachments', optional
Use to restrict to either up- or downstream partners.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron and the number of
synapses with the query neurons::
neuron_name skeleton_id num_nodes relation total skid1 skid2 ...
0 name1 skid1 node_count1 upstream n_syn n_syn ...
1 name2 skid2 node_count2 downstream n_syn n_syn ..
2 name3 skid3 node_count3 gapjunction n_syn n_syn .
...
``relation`` can be ``'upstream'`` (incoming), ``'downstream'``
(outgoing), ``'attachment'`` or ``'gapjunction'`` (gap junction).
Warning
-------
By default, will exclude single node partners! Set ``min_size=1`` to return
ALL partners including placeholder nodes.
Notes
-----
Partners can show up multiple times if they are e.g. pre- AND postsynaptic!
Examples
--------
>>> example_skids = [16, 201, 150, 20]
>>> cn = pymaid.get_partners(example_skids)
>>> # Get only upstream partners
>>> subset = cn[ cn.relation == 'upstream' ]
>>> # Get partners with more than e.g. 5 synapses across all neurons
>>> subset2 = cn[ cn[example_skids].sum(axis=1) > 5 ]
>>> # Combine above conditions (watch parentheses!)
>>> subset3 = cn[(cn.relation=='upstream') &
... (cn[example_skids].sum(axis=1) > 5)]
See Also
--------
:func:`~pymaid.adjacency_matrix`
Use if you need an adjacency matrix instead of a table.
:func:`~pymaid.get_partners_in_volume`
Use if you only want connectivity within a given volume.
:func:`~pymaid.filter_connectivity`
Use to restrict connector table to given part of a neuron
or a volume.
:func:`~cn_table_from_connectors`
Returns "fragmented" connectivity. Use e.g. if you are
working with multiple fragments from the same neuron.
"""
if not isinstance(min_confidence, (float, int)) or min_confidence < 0 or min_confidence > 5:
raise ValueError('min_confidence must be 0-5.')
# This maps CATMAID JSON relations to more relatable terms (I think)
relations = {'incoming': 'upstream',
'outgoing': 'downstream',
'gapjunctions': 'gapjunction',
'attachments': 'attachment'}
# Catch some easy mistakes regarding relations:
repl = {v: k for k, v in relations.items()}
directions = [repl.get(d, d) for d in directions]
wrong_dir = set(directions) - set(relations.keys())
if wrong_dir:
raise ValueError('Unknown direction "{}". Please use a combination '
'of "{}"'.format(', '.join(wrong_dir),
', '.join(relations.keys())))
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
x = np.array(x).astype(str)
remote_connectivity_url = remote_instance._get_connectivity_url()
connectivity_post = {}
connectivity_post['boolean_op'] = 'OR'
connectivity_post['with_nodes'] = False
for i, skid in enumerate(x):
tag = 'source_skeleton_ids[{0}]'.format(i)
connectivity_post[tag] = skid
logger.info('Fetching connectivity table for {} neurons'.format(len(x)))
connectivity_data = remote_instance.fetch(remote_connectivity_url,
post=connectivity_post)
# Delete directions that we don't want
connectivity_data.update(
{d: [] for d in connectivity_data if d not in directions})
# Get neurons' names
names = get_names([n for d in connectivity_data for n in connectivity_data[
d]] + list(x), remote_instance=remote_instance)
df = pd.DataFrame(columns=['neuron_name', 'skeleton_id',
'num_nodes', 'relation'] + list(x))
# Number of synapses is returned as list of links with 0-5 confidence:
# {'skid': [0, 1, 2, 3, 4, 5]}
# This is being collapsed into a single value before returning it.
for d in relations:
if d not in connectivity_data:
continue
df_temp = pd.DataFrame([[
names[str(n)],
str(n),
int(connectivity_data[d][n]['num_nodes']),
relations[d]] +
[sum(connectivity_data[d][n]['skids'].get(s,
[0, 0, 0, 0, 0])[min_confidence - 1:]) for s in x]
for i, n in enumerate(connectivity_data[d])
],
columns=['neuron_name', 'skeleton_id', 'num_nodes',
'relation'] + [str(s) for s in x],
dtype=object
)
df = pd.concat([df, df_temp], axis=0)
df['total'] = df[x].sum(axis=1).values
# Now filter for synapse threshold and size
df = df[(df.num_nodes >= min_size) & (df.total >= threshold)]
df.sort_values(['relation', 'total'], inplace=True, ascending=False)
if filt:
if not isinstance(filt, (list, np.ndarray)):
filt = [filt]
filt = [str(s) for s in filt]
df = df[df.skeleton_id.isin(filt) | df.neuron_name.isin(filt)]
df.datatype = 'connectivity_table'
# Return reindexed concatenated dataframe
df.reset_index(drop=True, inplace=True)
logger.info('Done. Found {0} pre-, {1} postsynaptic and {2} gap '
'junction-connected neurons'.format(
*[df[df.relation == r].shape[0] for r in ['upstream',
'downstream',
'gapjunction']]))
return df
@cache.undo_on_error
def get_names(x, remote_instance=None):
"""Retrieve neuron names for a list of skeleton ids.
Parameters
----------
x
Neurons for wich to retrieve names. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
Either pass directly to function or define
globally as ``remote_instance``.
Returns
-------
dict
``{skid1: 'neuron_name', skid2: 'neuron_name', ...}``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
x = list(set(x))
remote_get_names_url = remote_instance._get_neuronnames()
get_names_postdata = {}
get_names_postdata['self.project_id'] = remote_instance.project_id
for i in range(len(x)):
key = 'skids[%i]' % i
get_names_postdata[key] = x[i]
names = remote_instance.fetch(remote_get_names_url, post=get_names_postdata)
logger.debug('Names for {} of {} skeleton IDs retrieved'.format(len(names),
len(x)))
return names
@cache.undo_on_error
def get_node_details(x, chunk_size=10000, convert_ts=True, remote_instance=None):
"""Retrieve detailed info for nodes and/or connectors.
Parameters
----------
x : list | CatmaidNeuron | CatmaidNeuronList
List of node ids: can be node or connector IDs!
If CatmaidNeuron/List will get both, nodes and
connectors!
chunk_size : int, optional
Querying large number of nodes will result in server
errors. We will thus query them in amenable bouts.
convert_ts : bool, optional
If True, will convert timestamps from strings to
datetime objects.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a node::
node_id creation_time creator edition_time ...
0
1
editor reviewers review_times
0
1
"""
if isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
node_ids = np.append(x.nodes.node_id.values,
x.connectors.connector_id.values)
elif not isinstance(x, (list, tuple, np.ndarray)):
node_ids = [x]
else:
node_ids = x
remote_instance = utils._eval_remote_instance(remote_instance)
logger.debug('Retrieving details for {} nodes...'.format(len(node_ids)))
urls = []
post = []
for ix in range(0, len(node_ids), chunk_size):
urls.append(remote_instance._get_node_info_url())
post.append({'node_ids[{}]'.format(k): tn for k, tn in enumerate(node_ids[ix:ix + chunk_size])})
# Get responses
resp = remote_instance.fetch(urls, post=post, desc='Chunks')
# Merge into a single dictionary
data = {k: d[k] for d in resp for k in d}
# Generate dataframe
data_columns = ['creation_time', 'user', 'edition_time',
'editor', 'reviewers', 'review_times']
df = pd.DataFrame(
[[e] + [d[k] for k in data_columns] for e, d in data.items()],
columns=['node_id'] + data_columns,
dtype=object
)
# Rename column 'user' to 'creator'
df.rename({'user': 'creator'}, axis='columns', inplace=True)
if convert_ts:
df['creation_time'] = pd.to_datetime(df.creation_time)
df['edition_time'] = pd.to_datetime(df.edition_time)
df['review_times'] = df.review_times.apply(lambda x: [pd.to_datetime(d)
for d in x])
return df
@cache.undo_on_error
def get_skid_from_node(node_ids, remote_instance=None):
"""Retrieve skeleton IDs from a list of nodes.
Parameters
----------
node_ids : int | list of int
Node ID(s) to retrieve skeleton IDs for.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{node_id: skeleton_ID, ...}``. If node does not exists,
``skeleton_ID`` will be ``None``.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
node_ids = utils.eval_node_ids(node_ids, connectors=False, nodes=True)
if not isinstance(node_ids, (list, np.ndarray)):
node_ids = [node_ids]
urls = [remote_instance._get_skid_from_tnid(tn) for tn in node_ids]
data = remote_instance.fetch(urls, desc='Fetch skids')
return {node_ids[i]: d.get('skeleton_id', None) for i, d in enumerate(data)}
@cache.undo_on_error
def get_node_table(x, include_details=True, convert_ts=True, remote_instance=None):
"""Retrieve node table(s) for a list of neurons.
Parameters
----------
x
Catmaid Neuron(s) as single or list of either:
1. skeleton IDs (int or str)
2. neuron name (str, exact match)
3. annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
include_details : bool, optional
If True, tags and reviewer are included in the table.
For larger lists, it is recommended to set this to
False to improve performance.
convert_ts : bool, optional
If True, will convert edition timestamp to pandas
datetime. Set to False to improve performance.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a node::
skeleton_id node_id parent_id confidence x y z ...
0
1
2
...
radius creator last_edition reviewers tag
0
1
2
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
logger.info('Retrieving {} node table(s)...'.format(len(x)))
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
# Generate URLs to retrieve
urls = []
for skid in x:
remote_nodes_list_url = remote_instance._get_skeleton_nodes_url(skid)
urls.append(remote_nodes_list_url)
node_list = remote_instance.fetch(urls, desc='Get tables')
logger.info('{} nodes retrieved. Creating table..'
'.'.format(sum([len(nl[0]) for nl in node_list])))
all_tables = []
for i, nl in enumerate(config.tqdm(node_list,
desc='Creating table',
leave=config.pbar_leave,
disable=config.pbar_hide)):
this_df = pd.DataFrame(nl[0],
columns=['node_id', 'parent_node_id',
'confidence', 'x', 'y', 'z', 'radius',
'creator', 'last_edited']
)
# Parent IDs can be `None` here - we will set them to -1
this_df.loc[this_df.parent_node_id.isnull(), 'parent_node_id'] = -1
this_df['parent_node_id'] = this_df.parent_node_id.astype(int)
# Keep track of skeleton ID
this_df['skeleton_id'] = x[i]
if include_details:
tag_dict = {}
for t in nl[2]:
tag_dict[t[0]] = tag_dict.get(t[0], []) + [t[1]]
reviewer_dict = {}
for r in nl[1]:
reviewer_dict[r[0]] = reviewer_dict.get(r[0], []) + [user_dict.get(r[1])]
this_df['reviewers'] = this_df.node_id.map(reviewer_dict)
this_df['tags'] = this_df.node_id.map(tag_dict)
all_tables.append(this_df)
# Concatenate all DataFrames
tn_table = pd.concat(all_tables, axis=0, ignore_index=True)
# Replace creator_id with their login and make it a categorical
tn_table['creator'] = tn_table.creator.map(user_dict).astype('category')
# Replace timestamp with datetime object
if convert_ts:
tn_table['last_edited'] = pd.to_datetime(tn_table.last_edited,
utc=True,
unit='s')
return tn_table
@cache.undo_on_error
def get_edges(x, remote_instance=None):
"""Retrieve edges between sets of neurons.
Synaptic connections only!
Parameters
----------
x
Neurons for which to retrieve edges. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents an edge::
source target weight
1
2
3
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
remote_get_edges_url = remote_instance._get_edges_url()
get_edges_postdata = {}
get_edges_postdata['confidence_threshold'] = '0'
for i in range(len(x)):
key = 'skeleton_ids[%i]' % i
get_edges_postdata[key] = x[i]
edges = remote_instance.fetch(remote_get_edges_url, post=get_edges_postdata)
df = pd.DataFrame([[e[0], e[1], sum(e[2])] for e in edges['edges']],
columns=['source', 'target', 'weight']
)
return df
@cache.undo_on_error
def get_connectors(x, relation_type=None, tags=None, remote_instance=None):
"""Retrieve connectors based on a set of filters.
Parameters
----------
x
Neurons for which to retrieve connectors. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
5. ``None`` if you want all fetch connectors that
match other criteria
relation_type : 'presynaptic_to' | 'postsynaptic_to' | 'gapjunction_with' | 'abutting' | 'attached_to', optional
If provided, will filter for these connection types.
tags : str | list of str, optional
If provided, will filter connectors for tag(s).
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a connector::
connector_id x y z confidence creator ...
0
1
...
editor creation_time edition_time
0
1
...
Examples
--------
Get all connectors for a single neuron:
>>> cn = pymaid.get_connectors(16)
Get every connector with a given tag:
>>> tagged_cn = pymaid.get_connectors(None, tags='FML_sample')
Get all tagged connectors for a set of neurons:
>>> tagged_cn2 = pymaid.get_connectors('annotation:glomerulus DA1',
tags='FML_sample')
See Also
--------
:func:`~pymaid.get_connector_details`
If you need details about the connectivity of a connector
:func:`~pymaid.get_connectors_between`
If you need to find the connectors between sets of neurons.
:func:`~pymaid.get_connector_links`
If you ned details about links for each connector.
:func:`pymaid.find_nodes`
Function to get nodes by tags, IDs or skeleton.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(x, type(None)):
x = utils.eval_skids(x, remote_instance=remote_instance)
remote_get_connectors_url = remote_instance._get_connectors_url()
postdata = {'with_tags': 'true', 'with_partners': 'true'}
# Add skeleton IDs filter (if applicable)
if not isinstance(x, type(None)):
postdata.update(
{'skeleton_ids[{0}]'.format(i): s for i, s in enumerate(x)})
# Add tags filter (if applicable)
if not isinstance(tags, type(None)):
if not isinstance(tags, (list, np.ndarray)):
tags = [tags]
postdata.update({'tags[{0}]'.format(i): str(t)
for i, t in enumerate(tags)})
# Add relation_type filter (if applicable)
allowed_relations = ['presynaptic_to', 'postsynaptic_to',
'gapjunction_with', 'abutting', 'attached_to']
if not isinstance(relation_type, type(None)):
if relation_type not in allowed_relations:
raise ValueError('Unknown relation type "{0}". Must be in '
'{1}'.format(relation_type, allowed_relations))
postdata.update({'relation_type': relation_type})
data = remote_instance.fetch(remote_get_connectors_url, post=postdata)
# creator_id and editor_id will be replaced with logins later
df = pd.DataFrame(data=data['connectors'],
columns=['connector_id', 'x', 'y', 'z', 'confidence',
'creator_id', 'editor_id', 'creation_time',
'edition_time'])
# Add tags
df['tags'] = df.connector_id.astype(str).map(data['tags'])
# Map hardwire connector type ID to their type name
# ATTENTION: "attachment" can be part of any connector type
rel_ids = {r['relation_id']: r for r in config.link_types}
# Get connector type IDs
cn_ids = {k: v[0][3] for k, v in data['partners'].items()}
# Map type ID to relation (also note conversion of connector ID to integer)
cn_type = {int(k): rel_ids.get(v, {'type': 'unknown'})['type']
for k, v in cn_ids.items()}
# Map connector ID to connector type
df['type'] = df.connector_id.map(cn_type)
# Add creator login instead of id
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
df['creator'] = df.creator_id.map(user_dict)
df['editor'] = df.editor_id.map(user_dict)
df.drop(['creator_id', 'editor_id'], inplace=True, axis=1)
# Convert timestamps to datetimes
df['creation_time'] = df['creation_time'].apply(
datetime.datetime.fromtimestamp)
df['edition_time'] = df['edition_time'].apply(
datetime.datetime.fromtimestamp)
df.datatype = 'connector_table'
return df
@cache.undo_on_error
def get_connector_links(x, with_tags=False, chunk_size=50,
remote_instance=None):
"""Retrieve connectors links for a set of neurons.
In essence, this will get you all "arrows" that point from a connector to
your neuron or from your neuron to a connector. It does NOT give you the
entire battery of connectors for a set of connectors. For that you have
to use :func:`~pymaid.get_connector_details`.
Parameters
----------
x : int | CatmaidNeuron | CatmaidNeuronList
Neurons/Skeleton IDs to retrieve link details for. If
CatmaidNeuron/List will respect changes made to
original neurons (e.g. pruning)!
with_tags : bool, optional
If True will also return dictionary of connector tags.
chunk_size : int, optional
Neurons are split into chunks of this size and then
queried sequentially to prevent server from returning
an error.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a connector link::
skeleton_id relation connector_id x y z confidence ...
0
1
2
...
creator node_id creation_time edition_time
0
1
2
(links, tags)
If ``with_tags=True``, will return above DataFrame and tags dict.
See Also
--------
:func:`~pymaid.get_connectors`
If you just need the connector table (ID, x, y, z, creator, etc).
:func:`~pymaid.get_connector_details`
Get the same data but by connector, not by link.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, warn_duplicates=False,
remote_instance=remote_instance)
df_collection = []
tags = {}
link_types = [l['relation'] for l in config.link_types]
with config.tqdm(desc='Fetching links', total=len(skids),
disable=config.pbar_hide,
leave=config.pbar_leave) as pbar:
for chunk in [skids[i:i + chunk_size] for i in range(0, len(skids), chunk_size)]:
# Generate URLs
GET = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(chunk)}
urls = [remote_instance._get_connector_links_url(relation_type=cn,
**GET) for cn in link_types]
# Fetch data
responses = remote_instance.fetch(urls, disable_pbar=True)
# Extract tags
if with_tags:
for r in responses:
tags.update(r['tags'])
# Generate separate DataFrames
data = [pd.DataFrame(r['links'],
columns=['skeleton_id', 'connector_id',
'x', 'y', 'z', 'confidence',
'creator', 'node_id',
'creation_time', 'edition_time']
) for r in responses]
# Add link type to each DataFrame
for t, d in zip(link_types, data):
d['relation'] = t
# Concatenate DataFrames
df = pd.concat(data, axis=0)
# Store
df_collection.append(df)
# Update progress bar
pbar.update(len(chunk))
# Merge DataFrames
df = pd.concat(df_collection, axis=0)
# Cater for cases in which the original neurons have been edited
if isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
df = df[df.connector_id.isin(x.connectors.connector_id)]
# Convert to timestamps
df['creation_time'] = pd.to_datetime(df.creation_time)
df['edition_time'] = pd.to_datetime(df.edition_time)
if with_tags:
return df, tags
return df
@cache.undo_on_error
def get_connector_details(x, remote_instance=None):
"""Retrieve details on sets of connectors.
Parameters
----------
x : list of connector IDs | CatmaidNeuron | CatmaidNeuronList
Connector ID(s) to retrieve details for. If
CatmaidNeuron/List, will use their connectors.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a connector::
connector_id presynaptic_to postsynaptic_to ...
0
1
2
...
presynaptic_to_node postsynaptic_to_node
0
1
2
See Also
--------
:func:`~pymaid.get_connectors`
If you just need the connector table (ID, x, y, z, creator, etc).
:func:`~pymaid.get_connector_links`
Get the same data but by link, not by connector.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
connector_ids = utils.eval_node_ids(x, connectors=True, nodes=False)
connector_ids = list(set(connector_ids))
remote_get_connectors_url = remote_instance._get_connector_details_url()
# Depending on DATA_UPLOAD_MAX_NUMBER_FIELDS of your CATMAID server
# (default = 1000), we have to cut requests into batches smaller than that
DATA_UPLOAD_MAX_NUMBER_FIELDS = min(50000, len(connector_ids))
connectors = []
with config.tqdm(total=len(connector_ids), desc='CN details',
disable=config.pbar_hide,
leave=config.pbar_leave) as pbar:
for b in range(0, len(connector_ids), DATA_UPLOAD_MAX_NUMBER_FIELDS):
get_connectors_postdata = {}
for i, s in enumerate(connector_ids[b:b + DATA_UPLOAD_MAX_NUMBER_FIELDS]):
key = 'connector_ids[%i]' % i
get_connectors_postdata[key] = s # connector_ids[i]
connectors += remote_instance.fetch(remote_get_connectors_url,
post=get_connectors_postdata)
pbar.update(DATA_UPLOAD_MAX_NUMBER_FIELDS)
logger.info('Data for %i of %i unique connector IDs retrieved' % (
len(connectors), len(set(connector_ids))))
columns = ['connector_id', 'presynaptic_to', 'postsynaptic_to',
'presynaptic_to_node', 'postsynaptic_to_node']
df = pd.DataFrame([[cn[0]] + [cn[1][e] for e in columns[1:]] for cn in connectors],
columns=columns,
dtype=object
)
return df
@cache.undo_on_error
def get_connector_tags(x, remote_instance=None):
"""Retrieve tags on sets of connectors.
Parameters
----------
x : list of connector IDs | CatmaidNeuron | CatmaidNeuronList
Connector ID(s) to retrieve details for. If
CatmaidNeuron/List, will use their connectors.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
---------
dict
Dictionary mapping tags (``str``) to connector IDs
(``int``)::
{
tag1: [connector1_id, connector2_id, ...],
tag2: [ ... ], ...
}
"""
remote_instance = utils._eval_remote_instance(remote_instance)
connector_ids = utils.eval_node_ids(x, connectors=True, nodes=False)
connector_ids = list(set(connector_ids))
remote_get_node_labels_url = remote_instance._get_node_labels_url()
post = {'connector_ids': ','.join([str(tn) for tn in connector_ids])}
resp = remote_instance.fetch(remote_get_node_labels_url, post=post)
cn_tags = {}
for cnid in resp:
cn_tags.update({tag: cn_tags.get(tag, []) + [int(cnid)] for tag in resp[cnid]})
return cn_tags
@cache.undo_on_error
def get_connectors_between(a, b, directional=True, remote_instance=None):
"""Retrieve connectors between sets of neurons.
Important
---------
This function does currently *not* return gap junctions between neurons.
Notes
-----
A connector can show up multiple times if it is connecting to more than one
nodes of the same neuron.
Parameters
----------
a,b
Neurons for which to retrieve connectors. Can be:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
directional : bool, optional
If True, only connectors a -> b are listed,
otherwise it is a <-> b.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a connector::
connector_id connector_loc node1_id source_neuron ...
0
1
2
...
confidence1 creator1 node1_loc node2_id target_neuron ...
0
1
2
...
confidence2 creator2 node2_loc
0
1
2
See Also
--------
:func:`~pymaid.get_edges`
If you just need the number of synapses between neurons, this is much
faster.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
a = utils.eval_skids(a, remote_instance=remote_instance)
b = utils.eval_skids(b, remote_instance=remote_instance)
if len(a) == 0:
raise ValueError('No source neurons provided')
if len(b) == 0:
raise ValueError('No target neurons provided')
post = {'relation': 'presynaptic_to'}
post.update({'skids1[{0}]'.format(i): s for i, s in enumerate(a)})
post.update({'skids2[{0}]'.format(i): s for i, s in enumerate(b)})
url = remote_instance._get_connectors_between_url()
data = remote_instance.fetch(url, post=post)
if not directional:
post['relation'] = 'postsynaptic_to'
data += remote_instance.fetch(url, post=post)
df = pd.DataFrame(data,
columns=['connector_id', 'connector_loc', 'node1_id',
'source_neuron', 'confidence1', 'creator1',
'node1_loc', 'node2_id',
'target_neuron', 'confidence2', 'creator2',
'node2_loc'])
# Get user list and replace IDs with logins
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
df['creator1'] = df['creator1'].map(user_dict)
df['creator2'] = df['creator2'].map(user_dict)
return df
@cache.undo_on_error
def get_review(x, remote_instance=None):
"""Retrieve review status for a set of neurons.
Parameters
----------
x
Neurons for which to get review status. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron::
skeleton_id neuron_name total_node_count nodes_reviewed ...
0
1
...
percent_reviewed
0
1
...
See Also
--------
:func:`~pymaid.get_review_details`
Gives you review status for individual nodes of a given neuron.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
remote_get_reviews_url = remote_instance._get_review_status_url()
names = {}
review_status = {}
CHUNK_SIZE = 1000
with config.tqdm(total=len(x), disable=config.pbar_hide,
desc='Rev. status',
leave=config.pbar_leave) as pbar:
for j in range(0, len(x), CHUNK_SIZE):
get_review_postdata = {}
for i in range(j, min(j + CHUNK_SIZE, len(x))):
key = 'skeleton_ids[%i]' % i
get_review_postdata[key] = str(x[i])
names.update(get_names(x[j:j + CHUNK_SIZE],
remote_instance=remote_instance))
review_status.update(remote_instance.fetch(remote_get_reviews_url,
post=get_review_postdata))
pbar.update(CHUNK_SIZE)
df = pd.DataFrame([[s,
names[str(s)],
review_status[s][0],
review_status[s][1],
int(review_status[s][1] / review_status[s][0] * 100)
] for s in review_status],
columns=['skeleton_id', 'neuron_name',
'total_node_count', 'nodes_reviewed',
'percent_reviewed']
)
return df
@cache.undo_on_error
def get_user_annotations(x, remote_instance=None):
"""Retrieve annotations used by given user(s).
Parameters
----------
x
User(s) to get annotation for. Can be either:
1. single or list of user IDs
2. single or list of user login names
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame (df) in which each row represents a single annotation::
annotation annotated_on times_used user_id annotation_id user_login
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(x, (list, np.ndarray)):
x = [x]
# Get user list
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
try:
ids = [int(e) for e in x]
except BaseException:
ids = user_list.set_index('login').loc[x, 'id'].values
# This works with neuron_id NOT skeleton_id
# neuron_id can be requested via neuron_names
url_list = list()
postdata = list()
iDisplayLength = 500
for u in ids:
url_list.append(remote_instance._get_annotation_table_url())
postdata.append(dict(user_id=int(u),
iDisplayLength=iDisplayLength))
# Get data
annotations = [e['aaData'] for e in remote_instance.fetch(
url_list, post=postdata, desc='Get annot')]
# Add user login
for i, u in enumerate(ids):
for an in annotations[i]:
an.append(user_dict.get(u, 'Anonymous'))
# Now flatten the list of lists
annotations = [an for sublist in annotations for an in sublist]
# Create dataframe
df = pd.DataFrame(annotations,
columns=['annotation', 'annotated_on', 'times_used',
'user_id', 'annotation_id', 'user_login'],
dtype=object
)
df['annotated_on'] = [datetime.datetime.strptime(
d[:16], '%Y-%m-%dT%H:%M') for d in df['annotated_on'].values]
return df.sort_values('times_used').reset_index(drop=True)
@cache.undo_on_error
def get_annotation_details(x, remote_instance=None):
"""Retrieve annotations for a set of neuron.
Returns more details than :func:`~pymaid.get_annotations` but is slower.
Contains timestamps and user IDs (same API as neuron navigator).
Parameters
----------
x
Neurons to get annotation details for. Can be either:
1. List of skeleton ID(s) (int or str)
2. List of neuron name(s) (str, exact match)
3. An annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a single annotation::
annotation skeleton_id time_annotated user_id annotation_id user
0
1
...
See Also
--------
:func:`~pymaid.get_annotations`
Gives you annotations for a list of neurons (faster).
Examples
--------
>>> # Get annotations for a set of neurons
>>> an = pymaid.get_annotation_details([ 12, 57003 ])
>>> # Get those for a single neuron
>>> an[ an.skeleton_id == '57003' ]
>>> # Get annotations given by set of users
>>> an[ an.user.isin( ['schlegelp', 'lif'] )]
>>> # Get most recent annotations
>>> import datetime
>>> an[ an.time_annotated > datetime.date(2017, 6, 1) ]
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
# This works with neuron_id NOT skeleton_id
# neuron_id can be requested via neuron_names
url_list = list()
postdata = list()
neuron_ids = get_neuron_id(skids, remote_instance=remote_instance)
for s in skids:
nid = neuron_ids.get(str(s))
url_list.append(remote_instance._get_annotation_table_url())
postdata.append(dict(neuron_id=int(nid)))
# Get data
annotations = [e['aaData'] for e in remote_instance.fetch(url_list,
post=postdata,
desc='Get annot')]
# Get user list
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
# Add skeleton ID and user login
for i, s in enumerate(skids):
for an in annotations[i]:
an.insert(1, s)
an.append(user_dict.get(an[4]))
# Now flatten the list of lists
annotations = [an for sublist in annotations for an in sublist]
# Create dataframe
df = pd.DataFrame(annotations,
columns=['annotation', 'skeleton_id', 'time_annotated',
'times_used', 'user_id', 'annotation_id',
'user'],
dtype=object
)
# Times used appears to not be working (always shows "1") - remove it
df.drop('times_used', inplace=True, axis=1)
df['time_annotated'] = [datetime.datetime.strptime(
d[:16], '%Y-%m-%dT%H:%M') for d in df['time_annotated'].values]
return df.sort_values('annotation').reset_index(drop=True)
@cache.undo_on_error
def get_annotations(x, remote_instance=None):
"""Retrieve annotations for a list of skeleton ids.
If a neuron has no annotations, it will not show up in returned dict!
Notes
-----
This API endpoint does not process more than 250 neurons at a time!
Parameters
----------
x
Neurons for which to retrieve annotations. Can be
either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{skeleton_id: [annnotation, annotation], ...}``
See Also
--------
:func:`~pymaid.get_annotation_details`
Gives you more detailed information about annotations
of a set of neuron (includes timestamp and user) but
is slower.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
remote_get_annotations_url = remote_instance._get_annotations_for_skid_list()
get_annotations_postdata = {'metaannotations': 0, 'neuronnames': 0}
for i in range(len(x)):
key = 'skeleton_ids[%i]' % i
get_annotations_postdata[key] = str(x[i])
annotation_list_temp = remote_instance.fetch(remote_get_annotations_url,
post=get_annotations_postdata)
annotation_list = {}
try:
for skid in annotation_list_temp['skeletons']:
annotation_list[skid] = []
# for entry in annotation_list_temp['skeletons'][skid]:
for entry in annotation_list_temp['skeletons'][skid]['annotations']:
annotation_id = entry['id']
annotation_list[skid].append(
annotation_list_temp['annotations'][str(annotation_id)])
return(annotation_list)
except BaseException:
raise Exception(
'No annotations retrieved. Make sure that the skeleton IDs exist.')
@cache.wipe_and_retry
def get_annotation_id(annotations, allow_partial=False, raise_not_found=True,
remote_instance=None):
"""Retrieve the annotation ID for single or list of annotation(s).
Parameters
----------
annotations : str | list of str
Single annotations or list of multiple annotations.
allow_partial : bool, optional
If True, will allow partial matches.
raise_not_found : bool, optional
If True raise Exception if no match for any of the
query annotations is found. Else log warning.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{'annotation_name': 'annotation_id', ...}``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
logger.debug('Retrieving list of annotations...')
remote_annotation_list_url = remote_instance._get_annotation_list()
an_list = remote_instance.fetch(remote_annotation_list_url)
# Turn into pandas array
an_list = pd.DataFrame.from_records(an_list['annotations'])
annotations = utils._make_iterable(annotations)
annotation_ids = {}
for an in annotations:
# This is just to catch misunderstandings with parsing skeleton IDs
if an.startswith('annotation:'):
logger.warning('Removing unexpected "annotation:" prefix.')
an = an[11:]
# Strip whitespaces
an = an.strip()
# Strip tilde -> consider that people might use e.g. "~/VA6" for NOT
# VA6
if an.startswith('~'):
an = an[1:]
# '/' indicates regex
if an.startswith('/'):
re_str = an[1:]
# If allow partial just use the raw string
elif allow_partial:
re_str = an
# If exact match, encode this in regex
else:
re_str = '^{}$'.format(an)
# Search for matches
res = an_list[an_list.name.str.match(re_str)].set_index('name').id.to_dict()
if not res:
logger.warning('No annotation found for "{}"'.format(an))
annotation_ids.update(res)
if not annotation_ids:
if raise_not_found:
raise Exception('No matching annotation(s) found')
else:
logger.warning('No matching annotation(s) found')
return annotation_ids
@cache.undo_on_error
def find_nodes(tags=None, node_ids=None, skeleton_ids=None,
remote_instance=None):
"""Get nodes by tag (label), ID or associated skeleton.
Search intersected (logical AND) across parameters but additive (logical OR)
within each parameter (see examples).
Parameters
----------
tags : str | list of str
Use to restrict to nodes with given tags.
node_ids : int | list of int
Use to restrict to nodes with given IDs.
skeleton_ids : str | int | CatmaidNeuron/List, optional
Use to restrict to a set of neurons. Can be:
1. skeleton ID(s) (int or str)
2. neuron name(s) (str)
3. annotation(s): e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a node::
skeleton_id node_id parent_id x y z confidence ...
0
1
2
...
radius edition_time creator_id
0
1
2
See Also
--------
:func:`pymaid.get_connectors`
Function to get connectors by neurons and/or by tags.
Examples
--------
Get all nodes with a given tag
>>> tagged = pymaid.find_nodes(tags='SCHLEGEL_LH')
Get all nodes of a set of neurons with either of two tags
>>> tagged = pymaid.find_nodes(tags=['SCHLEGEL_LH', 'SCHLEGEL_AL'],
skeleton_ids='annotation:glomerulus DA1')
"""
remote_instance = utils._eval_remote_instance(remote_instance)
url = remote_instance._get_node_table_url()
if all([isinstance(x, type(None)) for x in [tags, skeleton_ids, node_ids]]):
answer = ""
while answer not in ["y", "n"]:
answer = input("Your search parameters will retrieve ALL "
"nodes in the dataset. Proceed? "
"[Y/N] ").lower()
if answer != 'y':
logger.info('Query cancelled')
return
post = {}
if not isinstance(tags, type(None)):
tags = utils._make_iterable(tags)
post.update({'label_names[{}]'.format(i): t for i, t in enumerate(tags)})
if not isinstance(node_ids, type(None)):
node_ids = utils._make_iterable(node_ids)
post.update({'treenode_ids[{}]'.format(i): t for i, t in enumerate(node_ids)})
if not isinstance(skeleton_ids, type(None)):
skeleton_ids = utils.eval_skids(skeleton_ids, remote_instance=remote_instance)
post.update({'skeleton_ids[{}]'.format(i): s for i, s in enumerate(skeleton_ids)})
# Fetch
resp = remote_instance.fetch(url, post=post)
# Format is [[ID, parent ID, x, y, z, confidence, radius, skeleton_id,
# edition_time, user_id], ...]
df = pd.DataFrame(resp,
columns=['node_id', 'parent_id', 'x', 'y', 'z', 'confidence',
'radius', 'skeleton_id', 'edition_time',
'creator_id'])
# Reorder and return
return df[['skeleton_id', 'node_id', 'parent_id', 'x', 'y', 'z',
'confidence', 'radius', 'edition_time', 'creator_id']]
@cache.undo_on_error
def has_soma(x, tag='soma', min_rad=500, return_ids=False,
remote_instance=None):
"""Check if neuron(s) has soma.
Parameters
----------
x
Neurons which to check for a soma. Can be either:
1. skeleton ID(s) (int or str)
2. neuron name(s) (str)
3. annotation(s): e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
tag : str | None, optional
Tag we expect the soma to have. Set to ``None`` if
not applicable.
min_rad : int, optional
Minimum radius of soma.
return_ids : bool, optional
If True, will return node IDs of soma(s) found
instead of simply if a soma has been found.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
If ``return_ids=False``::
{skid1: True, skid2: False, ...}
If ``return_ids=True``::
{skid1: [node_id], skid2: [node_id], ...}
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_node_table_url()
post = {'label_names[0]': tag}
post.update({'skeleton_ids[{}]'.format(i): s for i, s in enumerate(x)})
# Fetch only nodes that have the soma label
resp = remote_instance.fetch(url, post=post)
# Format is [[ID, parent ID, x, y, z, confidence, radius, skeleton_id,
# edition_time, user_id], ...]
if return_ids is False:
by_skid = {int(s): False for s in x}
for e in resp:
by_skid[e[7]] = max(by_skid[e[7]], e[6] >= min_rad)
else:
by_skid = {int(s): [] for s in x}
for e in resp:
if e[6] >= min_rad:
by_skid[e[7]].append(e[0])
return by_skid
@cache.undo_on_error
def get_annotated(x, include_sub_annotations=False, raise_not_found=True,
allow_partial=False, remote_instance=None):
"""Retrieve entities (neurons + annotations) with given annotation(s).
This works similar to CATMAID's neuron search widget: multiple annotations
are intersected! Includes meta-annotations.
Parameters
----------
x : str | list of str
(Meta-)annotations(s) to search for. Like
CATMAID's search widget, you can use regex to
search for names by starting the query with a
leading ``/``. Use a leading ``~`` (tilde) to
indicate ``NOT`` condition.
include_sub_annotations : bool, optional
If True, will include entities that have
annotations meta-annotated with ``x``. Does not
work on `NOT` search conditions.
allow_partial : bool, optional
If True, partially matching annotations are
searched too.
raise_not_found : bool, optional
If True raise Exception if no match for any of the
query annotations is found. Else log warning.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents an entity::
id name skeleton_ids type
0
1
2
...
See Also
--------
:func:`pymaid.find_neurons`
Use to retrieve neurons by combining various
search criteria. For example names, reviewers,
annotations, etc.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
pos, neg = utils._eval_conditions(x)
post = {'with_annotations': False}
if pos:
pos_ids = get_annotation_id(pos, allow_partial=allow_partial,
raise_not_found=raise_not_found,
remote_instance=remote_instance)
post.update({'annotated_with[{}]'.format(i): n for i, n in enumerate(pos_ids.values())})
if include_sub_annotations:
post.update({'sub_annotated_with[{}]'.format(i): n for i, n in enumerate(pos_ids.values())})
if neg:
neg_ids = get_annotation_id(neg, allow_partial=allow_partial,
raise_not_found=raise_not_found,
remote_instance=remote_instance)
post.update({'not_annotated_with[{}]'.format(i): n for i, n in enumerate(neg_ids.values())})
logger.info('Searching for: {}'.format(','.join([str(s) for s in pos_ids])))
if neg:
logger.info('..... and NOT: {}'.format(','.join([str(s) for s in neg_ids])))
urls = remote_instance._get_annotated_url()
resp = remote_instance.fetch(urls, post=post, desc='Fetching')
return pd.DataFrame(resp['entities'])
@cache.undo_on_error
def get_skids_by_name(names, allow_partial=True, raise_not_found=True,
remote_instance=None):
"""Retrieve the all neurons with matching name.
Parameters
----------
names : str | list of str
Name(s) to search for. Like CATMAID's search widget,
you can use regex to search for names by starting
the query with a leading ``/``.
allow_partial : bool, optional
If True, partial matches are returned too.
raise_not_found : bool, optional
If True, will raise an exception of no matches for
given name(s) are found. Else will return empty
DataFrame.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron::
name skeleton_id
0
1
2
...
See Also
--------
:func:`pymaid.find_neurons`
Use to retrieve neurons by combining various
search criteria. For example names, reviewers,
annotations, etc.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
# Only look for unique names
names = list(set(utils._make_iterable(names, force_type=str)))
# Prepare names for regex search on the backend
post = []
for n in names:
post.append({'name': n,
'with_annotations': False,
'name_exact': True})
# If we allow partial matches or are using regex, set exact_name to False
if allow_partial or n.startswith('/'):
post[-1]['name_exact'] = False
urls = [remote_instance._get_annotated_url() for n in post]
responses = remote_instance.fetch(urls, post=post, desc='Fetching names')
neurons = [n for res in responses for n in res['entities'] if n['type'] == 'neuron']
df = pd.DataFrame([[n['name'], n['skeleton_ids'][0]] for n in neurons],
columns=['name', 'skeleton_id'])
if df.empty and raise_not_found:
raise Exception('No matching name(s) found')
return df.sort_values(['name']).drop_duplicates().reset_index(drop=True)
@cache.undo_on_error
def get_skids_by_annotation(annotations, allow_partial=False, intersect=False,
raise_not_found=True, remote_instance=None):
"""Retrieve the neurons annotated with given annotation(s).
Parameters
----------
annotations : str | list
Single annotation or list of multiple annotations.
Using a tilde (~) as prefix is interpreted as NOT.
allow_partial : bool, optional
If True, allow partial match of annotation.
intersect : bool, optional
If True, neurons must have ALL provided
annotations.
raise_not_found : bool, optional
If True raise Exception if no match for any of the
query annotations is found. Else log warning.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
list
``[skid1, skid2, skid3, ...]``
See Also
--------
:func:`pymaid.find_neurons`
Use to retrieve neurons by combining various
search criteria. For example names, reviewers,
annotations, etc.
:func:`pymaid.get_annotated`
Use to retrieve entities (neurons and annotations).
"""
remote_instance = utils._eval_remote_instance(remote_instance)
annotations = utils._make_iterable(annotations)
pos_an = [an for an in annotations if not an.startswith('~')]
neg_an = [an[1:] for an in annotations if an.startswith('~')]
# Placeholders in case we don't even ask for pos or neg
pos_ids = {}
neg_ids = {}
if pos_an:
pos_ids = get_annotation_id(pos_an,
raise_not_found=raise_not_found,
allow_partial=allow_partial,
remote_instance=remote_instance)
if neg_an:
neg_ids = get_annotation_id(neg_an,
raise_not_found=raise_not_found,
allow_partial=allow_partial,
remote_instance=remote_instance)
# Collapse for intersection...
if intersect:
annotation_post = [{'annotated_with[{}]'.format(i): v for i, v in enumerate(list(pos_ids.values()))}]
annotation_post[0].update({'not_annotated_with[{}]'.format(i): v for i, v in enumerate(list(neg_ids.values()))})
# ... or keep separate for no intersection
else:
annotation_post = [{'annotated_with': an} for an in pos_ids.values()]
annotation_post += [{'not_annotated_with': an} for an in neg_ids.values()]
# Need to clear empties
annotation_post = [p for p in annotation_post if p]
# Query server
remote_annotated_url = [remote_instance._get_annotated_url() for _ in annotation_post]
resp = remote_instance.fetch(remote_annotated_url, post=annotation_post)
# Extract skids from responses
annotated_skids = [e['skeleton_ids'][0] for r in resp for e in r['entities'] if e['type'] == 'neuron']
# Remove duplicates
annotated_skids = list(set(annotated_skids))
logger.debug('Found {} neurons with matching annotation(s)'.format(len(annotated_skids)))
return annotated_skids
@cache.undo_on_error
def neuron_exists(x, remote_instance=None):
"""Check if neurons exist in CATMAID.
Parameters
----------
x
Neurons to check if they exist in Catmaid. Can be:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
bool :
True if skeleton exists, False if not. If multiple
neurons are queried, returns a dict
``{skid1: True, skid2: False, ...}``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
if len(x) > 1:
return {n: neuron_exists(n, remote_instance=remote_instance) for n in x}
else:
x = x[0]
remote_get_neuron_name = remote_instance._get_single_neuronname_url(x)
response = remote_instance.fetch(remote_get_neuron_name)
if 'error' in response:
return False
else:
return True
@cache.undo_on_error
def get_node_info(x, remote_instance=None):
"""Retrieve info for a set of nodes.
Parameters
----------
x CatmaidNeuron | CatmaidNeuronList | list of node IDs
Single or list of node IDs. If CatmaidNeuron/List,
details for all it's nodes are requested.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas DataFrame
DataFrame in which each row represents a queried node::
node_id neuron_name skeleton_id skeleton_name neuron_id
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
node_ids = utils.eval_node_ids(x, connectors=False, nodes=True)
urls = [remote_instance._get_single_node_info_url(tn) for tn in node_ids]
data = remote_instance.fetch(urls, desc='Get info')
df = pd.DataFrame([[node_ids[i]] + list(n.values()) for i, n in enumerate(data)],
columns=['node_id'] + list(data[0].keys())
)
return df
@cache.undo_on_error
def get_node_tags(node_ids, node_type, remote_instance=None):
"""Retrieve tags for a set of nodes OR connectors.
Parameters
----------
node_ids
Single or list of node or connector IDs.
node_type : 'NODE' | 'CONNECTOR'
Set which node type of IDs you have provided as they
use different API endpoints!
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
dictionary containing tags for each node:
``{'nodeID': ['tag1', 'tag2', ...], 'nodeID' : [...], ...}``
Examples
--------
>>> pymaid.get_node_tags(['6626578', '6633237']
... 'NODE',
... remote_instance)
{'6633237': ['ends'], '6626578': ['ends']}
See Also
--------
:func:`pymaid.add_tags`
Use to add tags to nodes.
:func:`pymaid.delete_tags`
Use to delete node tags.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(node_ids, (list, np.ndarray)):
node_ids = [node_ids]
# Make sure node_ids are strings
node_ids = [str(n) for n in node_ids]
url = remote_instance._get_node_labels_url()
if node_type in ['TREENODE', 'TREENODES', 'NODES', 'NODE']:
key = 'treenode_ids'
elif node_type in ['CONNECTOR', 'CONNECTORS']:
key = 'connector_ids'
else:
raise TypeError(f'Unknown node_type parameter: {node_type}')
POST = {key: ','.join([str(tn) for tn in node_ids])}
return remote_instance.fetch(url, post=POST)
@cache.undo_on_error
def get_segments(x, remote_instance=None):
"""Retrieve list of segments for a neuron just like the review widget.
Parameters
----------
x
Neurons to retrieve. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
list
List of node IDs, ordered by length. If multiple neurons
are requested, returns a dict ``{skid: [], ...}``.
See Also
--------
``CatmaidNeuron.segments``
``CatmaidNeuron.short_segments``
Use these :class:`pymaid.CatmaidNeuron` attributes to access
segments generated by pymaid (faster).
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
urls = []
post_data = []
for s in x:
urls.append(remote_instance._get_review_details_url(s))
# For some reason this needs to fetched as POST (even though actual
# POST data is not necessary)
post_data.append({'placeholder': 0})
rdata = remote_instance.fetch(urls, post=post_data, desc='Get segs')
if len(x) > 1:
return {x[i]: [[tn['id'] for tn in arb['sequence']] for arb in rdata[i]] for i in range(len(x))}
else:
return [[tn['id'] for tn in arb['sequence']] for arb in rdata[0]]
@cache.undo_on_error
def get_review_details(x, remote_instance=None):
"""Retrieve review status (reviewer + timestamp) by node for given neuron.
Parameters
----------
x
Neurons to get review-details for. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas DataFrame
DataFrame in which each row respresents a node::
node_id skeleton_id reviewer1 reviewer2 reviewer 3
0 12345 12345123 datetime NaT datetime
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
node_list = []
urls = []
post_data = []
for s in x:
urls.append(remote_instance._get_review_details_url(s))
# For some reason this needs to fetched as POST (even though actual
# POST data is not necessary)
post_data.append({'placeholder': 0})
rdata = remote_instance.fetch(urls,
post=post_data,
desc='Get rev stats')
for i, neuron in enumerate(rdata):
# There is a small chance that nodes are counted twice but not
# tracking node_id speeds up this extraction a LOT
# node_ids = []
for arbor in neuron:
node_list += [(n['id'], x[i], n['rids'])
for n in arbor['sequence'] if n['rids']]
tn_to_skid = {n[0]: n[1] for n in node_list}
node_dict = {n[0]: {u[0]: datetime.datetime.strptime(
u[1][:16], '%Y-%m-%dT%H:%M') for u in n[2]} for n in node_list}
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
df = pd.DataFrame.from_dict(node_dict, orient='index').fillna(np.nan)
df.columns = df.columns.map(user_dict)
df['skeleton_id'] = [tn_to_skid[tn] for tn in df.index.values]
df.index.name = 'node_id'
df = df.reset_index(drop=False)
# Make sure we didn't count nodes twice
df = df[~df.duplicated('node_id')]
return df
@cache.undo_on_error
def get_logs(operations=[], entries=50, display_start=0, search="",
remote_instance=None):
"""Retrieve logs (same data as in log widget).
Parameters
----------
operations : list of str, optional
If empty, all operations will be queried from server
possible operations: 'join_skeleton',
'change_confidence', 'rename_neuron', 'create_neuron',
'create_skeleton', 'remove_neuron', 'split_skeleton',
'reroot_skeleton', 'reset_reviews', 'move_skeleton'
entries : int, optional
Number of entries to retrieve.
display_start : int, optional
Sets range of entries to return:
``display_start`` to ``display_start + entries``.
search : str, optional
Use to filter results for e.g. a specific skeleton ID
or neuron name.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a single operation::
user operation timestamp x y z explanation
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not operations:
operations = [-1]
elif not isinstance(operations, (list, np.ndarray)):
operations = [operations]
logs = []
for op in operations:
get_logs_postdata = {'sEcho': 6,
'iColumns': 7,
'iDisplayStart': display_start,
'iDisplayLength': entries,
'mDataProp_0': 0,
'sSearch_0': '',
'bRegex_0': False,
'bSearchable_0': False,
'bSortable_0': True,
'mDataProp_1': 1,
'sSearch_1': '',
'bRegex_1': False,
'bSearchable_1': False,
'bSortable_1': True,
'mDataProp_2': 2,
'sSearch_2': '',
'bRegex_2': False,
'bSearchable_2': False,
'bSortable_2': True,
'mDataProp_3': 3,
'sSearch_3': '',
'bRegex_3': False,
'bSearchable_3': False,
'bSortable_3': False,
'mDataProp_4': 4,
'sSearch_4': '',
'bRegex_4': False,
'bSearchable_4': False,
'bSortable_4': False,
'mDataProp_5': 5,
'sSearch_5': '',
'bRegex_5': False,
'bSearchable_5': False,
'bSortable_5': False,
'mDataProp_6': 6,
'sSearch_6': '',
'bRegex_6': False,
'bSearchable_6': False,
'bSortable_6': False,
'sSearch': '',
'bRegex': False,
'iSortCol_0': 2,
'sSortDir_0': 'desc',
'iSortingCols': 1,
'self.project_id': remote_instance.project_id,
'operation_type': op,
'search_freetext': search}
remote_get_logs_url = remote_instance._get_logs_url()
logs += remote_instance.fetch(remote_get_logs_url,
post=get_logs_postdata)['aaData']
df = pd.DataFrame(logs,
columns=['user', 'operation', 'timestamp',
'x', 'y', 'z', 'explanation']
)
df['timestamp'] = [datetime.datetime.strptime(
d[:16], '%Y-%m-%dT%H:%M') for d in df['timestamp'].values]
return df
@cache.undo_on_error
def get_contributor_statistics(x, separate=False, max_threads=500,
remote_instance=None):
"""Retrieve contributor statistics for given skeleton ids.
By default, stats are given over all neurons.
Parameters
----------
x
Neurons to get contributor stats for. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
separate : bool, optional
If True, stats are given per neuron.
max_threads : int, optional
Maximum parallel data requests. Overrides
``CatmaidInstance.max_threads``.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame or pandas.Series
Series, if ``separate=False``. DataFrame, if ``separate=True``::
skeleton_id node_contributors multiuser_review_minutes ..
1
2
3
post_contributors construction_minutes min_review_minutes ..
1
2
3
n_postsynapses n_presynapses pre_contributors n_nodes ..
1
2
3
review_contributors
1
2
3
Examples
--------
>>> # Plot contributions as pie chart
>>> import matplotlib.pyplot as plt
>>> cont = pymaid.get_contributor_statistics("annotation:uPN right")
>>> plt.subplot(131, aspect=1)
>>> ax1 = plt.pie(cont.node_contributors.values(),
... labels=cont.node_contributors.keys(),
... autopct='%.0f%%' )
>>> plt.subplot(132, aspect=1)
>>> ax2 = plt.pie(cont.pre_contributors.values(),
... labels=cont.pre_contributors.keys(),
... autopct='%.0f%%' )
>>> plt.subplot(133, aspect=1)
>>> ax3 = plt.pie(cont.post_contributors.values(),
... labels=cont.post_contributors.keys(),
... autopct='%.0f%%' )
>>> plt.show()
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
columns = ['skeleton_id', 'n_nodes', 'node_contributors', 'n_presynapses',
'pre_contributors', 'n_postsynapses', 'post_contributors',
'review_contributors', 'multiuser_review_minutes',
'construction_minutes', 'min_review_minutes']
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
if not separate:
with config.tqdm(total=len(x), desc='Contr. stats',
disable=config.pbar_hide,
leave=config.pbar_leave) as pbar:
stats = []
for j in range(0, len(x), max_threads):
pbar.update(j)
get_statistics_postdata = {}
for i in range(j, min(len(x), j + max_threads)):
key = 'skids[%i]' % i
get_statistics_postdata[key] = x[i]
remote_get_statistics_url = remote_instance._get_contributions_url()
stats.append(remote_instance.fetch(remote_get_statistics_url,
post=get_statistics_postdata))
# Now generate DataFrame
node_contributors = {user_dict.get(int(u)): sum([st['node_contributors'][u] for st in stats if u in st[
'node_contributors']]) for st in stats for u in st['node_contributors']}
pre_contributors = {user_dict.get(int(u)): sum([st['pre_contributors'][u] for st in stats if u in st[
'pre_contributors']]) for st in stats for u in st['pre_contributors']}
post_contributors = {user_dict.get(int(u)): sum([st['post_contributors'][u] for st in stats if u in st[
'post_contributors']]) for st in stats for u in st['post_contributors']}
review_contributors = {user_dict.get(int(u)): sum([st['review_contributors'][u] for st in stats if u in st[
'review_contributors']]) for st in stats for u in st['review_contributors']}
df = pd.Series([
x,
sum([st['n_nodes'] for st in stats]),
node_contributors,
sum([st['n_pre'] for st in stats]),
pre_contributors,
sum([st['n_post'] for st in stats]),
post_contributors,
review_contributors,
sum([st['multiuser_review_minutes'] for st in stats]),
sum([st['construction_minutes'] for st in stats]),
sum([st['min_review_minutes'] for st in stats])
],
index=columns,
dtype=object
)
else:
get_statistics_postdata = [{'skids[0]': s} for s in x]
remote_get_statistics_url = [
remote_instance._get_contributions_url() for s in x]
stats = remote_instance.fetch(remote_get_statistics_url,
post=get_statistics_postdata,
desc='Get contrib.')
df = pd.DataFrame([[
s,
stats[i]['n_nodes'],
{user_dict.get(int(u)): stats[i]['node_contributors'][u]
for u in stats[i]['node_contributors']},
stats[i]['n_pre'],
{user_dict.get(int(u)): stats[i]['pre_contributors'][u]
for u in stats[i]['pre_contributors']},
stats[i]['n_post'],
{user_dict.get(int(u)): stats[i]['post_contributors'][u]
for u in stats[i]['post_contributors']},
{user_dict.get(int(u)): stats[i]['review_contributors'][u]
for u in stats[i]['review_contributors']},
stats[i]['multiuser_review_minutes'],
stats[i]['construction_minutes'],
stats[i]['min_review_minutes']
] for i, s in enumerate(x)],
columns=columns,
dtype=object
)
return df
@cache.undo_on_error
def get_history(start_date=(datetime.date.today() - datetime.timedelta(days=7)).isoformat(),
end_date=datetime.date.today().isoformat(), split=True,
remote_instance=None):
"""Retrieves CATMAID project history.
If the time window is too large, the connection might time out which will
result in an error! Make sure ``split=True`` to avoid that.
Parameters
----------
start_date : datetime | str | tuple, optional, default=last week
dates can be either:
- ``datetime.date``
- ``datetime.datetime``
- str ``'YYYY-MM-DD'``, e.g. ``'2016-03-09'``
- tuple ``(YYYY, MM, DD)``, e.g. ``(2016, 3, 9)``
end_date : datetime | str | tuple, optional, default=today
See start_date.
split : bool, optional
If True, history will be requested in bouts of 6 months.
Useful if you want to look at a very big time window
as this can lead to gateway timeout.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.Series
A pandas.Series with the following entries::
{
cable : DataFrame containing cable created in nm.
Rows = users, columns = dates
connector_links : DataFrame containing connector links created.
Rows = users, columns = dates
reviewed : DataFrame containing nodes reviewed.
Rows = users, columns = dates
user_details : user-list (see pymaid.get_user_list())
nodes : DataFrame containing nodes created by user.
}
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Plot cable created by all users over time
>>> hist.cable.T.plot()
>>> plt.show()
>>> # Collapse users and plot sum of cable over time
>>> hist.cable.sum(0).plot()
>>> plt.show()
>>> # Plot single users cable (index by user login name)
>>> hist.cable.loc['schlegelp'].T.plot()
>>> plt.show()
>>> # Sum up cable created this week by all users
>>> hist.cable.values.sum()
>>> # Get number of active (non-zero) users
>>> active_users = hist.cable.astype(bool).sum(axis=0)
See Also
-------
:func:`~pymaid.get_user_stats`
Returns a summary of user stats as table.
:func:`~pymaid.plot_history`
Quick way to plot history over time.
"""
def _constructor_helper(data, key, days):
""" Helper to extract variable from data returned by CATMAID server
"""
temp = []
for d in days:
try:
temp.append(data[d][key])
except BaseException:
temp.append(0)
return temp
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(start_date, datetime.date):
start_date = start_date.isoformat()
elif isinstance(start_date, datetime.datetime):
start_date = start_date.isoformat()[:10]
elif isinstance(start_date, (tuple, list)):
start_date = datetime.date(start_date[0], start_date[
1], start_date[2]).isoformat()
if isinstance(end_date, datetime.date):
end_date = end_date.isoformat()
elif isinstance(end_date, datetime.datetime):
end_date = end_date.isoformat()[:10]
elif isinstance(end_date, (tuple, list)):
end_date = datetime.date(end_date[0], end_date[
1], end_date[2]).isoformat()
rounds = []
if split:
start = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
end = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()
logger.info(
'Retrieving %i days of history in bouts!' % (end - start).days)
# First make big bouts of roughly 6 months each
while start < (end - datetime.timedelta(days=6 * 30)):
rounds.append((start.isoformat(),
(start + datetime.timedelta(days=6 * 30)).isoformat()))
start += datetime.timedelta(days=6 * 30)
# Append the last bit
if start < end:
rounds.append((start.isoformat(), end.isoformat()))
else:
rounds = [(start_date, end_date)]
data = []
for r in config.tqdm(rounds, desc='Retrieving history',
disable=config.pbar_hide, leave=config.pbar_leave):
get_history_GET_data = {'pid': remote_instance.project_id,
'start_date': r[0],
'end_date': r[1]
}
remote_get_history_url = remote_instance._get_history_url()
remote_get_history_url += f'?{urllib.parse.urlencode(get_history_GET_data)}'
logger.debug(f'Retrieving user history from {r[0]} to {r[1]}.')
data.append(remote_instance.fetch(remote_get_history_url))
# Now merge data into a single dict
stats = dict(data[0])
for d in data:
stats['days'] += [e for e in d['days'] if e not in stats['days']]
stats['daysformatted'] += [e for e in d['daysformatted']
if e not in stats['daysformatted']]
for u in d['stats_table']:
stats['stats_table'][u].update(d['stats_table'][u])
user_list = get_user_list(remote_instance=remote_instance).set_index('id')
user_list.index = user_list.index.astype(str)
user_dict = user_list.login.to_dict()
df = pd.Series([
pd.DataFrame([_constructor_helper(stats['stats_table'][u], 'new_cable_length', stats['days']) for u in stats['stats_table']],
index=[user_dict.get(u, f'Anonymous{i}') for i, u in enumerate(stats['stats_table'].keys())],
columns=pd.to_datetime([datetime.datetime.strptime(d, '%Y%m%d').date() for d in stats['days']])),
pd.DataFrame([_constructor_helper(stats['stats_table'][u], 'new_treenodes', stats['days']) for u in stats['stats_table']],
index=[user_dict.get(u, f'Anonymous{i}') for i, u in enumerate(stats['stats_table'].keys())],
columns=pd.to_datetime([datetime.datetime.strptime(d, '%Y%m%d').date() for d in stats['days']])),
pd.DataFrame([_constructor_helper(stats['stats_table'][u], 'new_connectors', stats['days']) for u in stats['stats_table']],
index=[user_dict.get(u, f'Anonymous{i}') for i, u in enumerate(stats['stats_table'].keys())],
columns=pd.to_datetime([datetime.datetime.strptime(d, '%Y%m%d').date() for d in stats['days']])),
pd.DataFrame([_constructor_helper(stats['stats_table'][u], 'new_reviewed_nodes', stats['days']) for u in stats['stats_table']],
index=[user_dict.get(u, f'Anonymous{i}') for i, u in enumerate(stats['stats_table'].keys())],
columns=pd.to_datetime([datetime.datetime.strptime(d, '%Y%m%d').date() for d in stats['days']])),
user_list.reset_index(drop=True)
],
index=['cable', 'nodes', 'connector_links',
'reviewed', 'user_details']
)
return df
@cache.undo_on_error
def get_nodes_in_volume(*x, coord_format='NM', resolution=(4, 4, 50),
remote_instance=None):
"""Retrieve nodes and connectors in given bounding box.
Please note that there is a cap on the number of nodes returned that is
hard wired into the CATMAID server's settings.
Parameters
----------
*x
Coordinates defining the bounding box. Can be
either:
- 1d list of coordinates: left, right, top, bottom, z1, z2
- 2d list of coordinates: [[left, right], [top, bottom], [z1, z2]]
- pymaid.Volume
Can be given in nm or pixels.
coord_format : str, optional
Define whether provided coordinates are in
nanometer ('NM') or in pixels/slices ('PIXEL').
resolution : tuple of floats, optional
x/y/z resolution in nm [default = (4, 4, 50)]
Used to transform to nm if limits are given in
pixels.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
nodes : pandas.DataFrame
DataFrame in which each row is a node::
node_id parent_id x y z confidence radius skeleton_id edition_time user_id
0
1
2
connectors : pandas.DataFrame
DataFrame in which each row is a connector::
connector_id x y z confidence edition_time user_id partners
0
1
2
``partners`` are lists of::
[node_id, relation_id, link_confidence, link_edition_time, link_id]
truncated : bool
If True, lists are truncated due to node limit reached.
relation_map : dict
Map for ``relation_id`` in connector's ``partner`` column.
Examples
--------
Get (truncated) lists of nodes and connectors in the bounding box of the AL:
>>> al = pymaid.get_volume('AL_R')
>>> nodes, connectors, truncated, relation_map = pymaid.get_nodes_in_volume(al)
>>> truncated
True
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(x[0], ns.Volume):
x = x[0].bbox
# Flatten the list of coordinates
coords = np.array(x).flatten()
if coords.shape[0] != 6:
raise ValueError('Must provide 6 coordinates (left, right, top, '
'bottom, z1, z1), got {}'.format(coords.shape[0]))
# Extract coords
left, right, top, bottom, z1, z2 = coords
# Set resolution to 1:1 if coordinates are already in nm
if coord_format == 'NM':
resolution = (1, 1, 1)
remote_nodes_list = remote_instance._get_node_list_url()
node_list_postdata = {
'left': left * resolution[0],
'right': right * resolution[0],
'top': top * resolution[1],
'bottom': bottom * resolution[1],
'z1': z1 * resolution[2],
'z2': z2 * resolution[2],
# Atnid seems to be related to fetching the active node too
# -> will be ignored if atnid = -1
'atnid': -1,
'labels': False,
# 'limit': 3500, # this doesn't do anything -> hard wired into server settings
}
node_data = remote_instance.fetch(remote_nodes_list,
post=node_list_postdata)
tn = pd.DataFrame(node_data[0],
columns=['node_id', 'parent_id',
'x', 'y', 'z', 'confidence',
'radius', 'skeleton_id',
'edition_time', 'user_id'])
# Fix parent ID
tn.loc[tn.parent_id.isnull(), 'parent_id'] = -1
tn['parent_id'] = tn.parent_id.astype(int).astype(object)
tn.loc[tn.parent_id < 0, 'parent_id'] = None
tn['edition_time'] = pd.to_datetime(tn.edition_time, unit='s', utc=True)
cn = pd.DataFrame(node_data[1],
columns=['connector_id', 'x', 'y', 'z',
'confidence', 'edition_time',
'user_id', 'partners'])
cn['edition_time'] = pd.to_datetime(cn.edition_time, unit='s', utc=True)
node_limit_reached = node_data[3]
relation_map = node_data[4]
return tn, cn, node_limit_reached, relation_map
@cache.undo_on_error
def find_neurons(names=None, annotations=None, volumes=None, users=None,
from_date=None, to_date=None, reviewed_by=None, skids=None,
intersect=False, partial_match=False, only_soma=False,
min_size=1, minimum_cont=None, remote_instance=None):
"""Find neurons matching given search criteria.
Warning
-------
Depending on the parameters, this can take quite a while! Also: by default,
will return single-node neurons! Use the ``min_size`` parameter to change
that behaviour.
Parameters
----------
names : str | list of str
Neuron name(s) to search for.
annotations : str | list of str
Annotation(s) to search for.
volumes : str | navis.Volume | list of either
CATMAID volume(s) to look into. This uses
:func:`~pymaid.get_neurons_in_volumes` and will look
for neurons within the **bounding box** of given
volume(s).
users : int | str | list of either, optional
User ID(s) (int) or login(s) (str).
reviewed_by : int | str | list of either, optional
User ID(s) (int) or login(s) (str) of reviewer.
from_date : datetime | list of integers, optional
Format: [year, month, day]. Return neurons created
after this date. This works ONLY if also querying by
``users`` or ``reviewed_by``!
to_date : datetime | list of integers, optional
Format: [year, month, day]. Return neurons created
before this date. This works ONLY if also querying by
``users`` or ``reviewed_by``!
skids : list of skids, optional
Can be a list of skids, a CatmaidNeuronList or pandas
DataFrame with "skeleton_id" column.
intersect : bool, optional
If multiple search criteria are provided, neurons have
to meet all of them in order to be returned. This
is first applied WITHIN search criteria (works for
multiple ``annotations``, ``volumes``, ``users`` and
``reviewed_by``) and then ACROSS critera!
partial_match : bool, optional
If True, partial matches for *names* AND *annotations*
are allowed.
minimum_cont : int, optional
If looking for specific ``users``: minimum contribution
(in nodes) to a neuron in order for it to be counted.
Only applicable if ``users`` is provided. If multiple
users are provided contribution is calculated across
all users. Minimum contribution does NOT take start
and end dates into account! This is applied AFTER
intersecting!
min_size : int, optional
Minimum size (in nodes) for neurons to be returned.
The lower this value, the longer it will take to
filter.
only_soma : bool, optional
If True, only neurons with a soma are returned.
remote_instance : CatmaidInstance
If not passed directly, will try using globally
defined CatmaidInstance.
Returns
-------
:class:`~pymaid.CatmaidNeuronList`
Examples
--------
>>> # Simple request for neurons with given annotations
>>> to_find = ['glomerulus DA1', 'glomerulus DL4']
>>> skids = pymaid.find_neurons(annotations=to_find)
>>> # Get only neurons that have both annotations
>>> skids = pymaid.find_neurons(annotations=to_find, intersect=True)
>>> # Get all neurons with more than 1000 nodes
>>> skids = pymaid.find_neurons(min_size=1000)
>>> # Get all neurons that have been traced recently by given user
>>> skids = pymaid.find_neurons(users='schlegelp',
... from_date=[2017, 10, 1])
>>> # Get all neurons traced by a given user within a certain volume
>>> skids = pymaid.find_neurons(users='schlegelp',
... minimum_cont=1000,
... volumes='LH_R')
"""
remote_instance = utils._eval_remote_instance(remote_instance)
# Fist, we have to prepare a whole lot of parameters
if users:
users = utils.eval_user_ids(users, remote_instance=remote_instance)
if reviewed_by:
reviewed_by = utils.eval_user_ids(
reviewed_by, remote_instance=remote_instance)
if annotations and not isinstance(annotations, (list, np.ndarray)):
annotations = [annotations]
if names and not isinstance(names, (list, np.ndarray)):
names = [names]
if volumes and not isinstance(volumes, (list, np.ndarray)):
volumes = [volumes]
# Bring dates into the correct format
if from_date and not to_date:
today = datetime.date.today()
to_date = (today.year, today.month, today.day)
elif to_date and not from_date:
from_date = (1900, 1, 1)
if isinstance(from_date, datetime.date):
from_date = [from_date.year, from_date.month, from_date.day]
if isinstance(to_date, datetime.date):
to_date = [to_date.year, to_date.month, to_date.day]
# Warn if from/to_date are used without also querying by user or reviewer
if from_date and not (users or reviewed_by):
logger.warning('Start/End dates can only be used for queries against '
'<users> or <reviewed_by>')
# Now go over all parameters and get sets of skids
sets_of_skids = []
if not isinstance(skids, type(None)):
skids = utils.eval_skids(skids, remote_instance=remote_instance)
sets_of_skids.append(set(skids))
# Get skids by name
if names:
urls = [remote_instance._get_annotated_url() for n in names]
post_data = [{'name': str(n),
'with_annotations': False,
'name_exact': not partial_match}
for n in names]
results = remote_instance.fetch(urls,
post=post_data,
desc='Get names')
this_name = []
for i, r in enumerate(results):
for e in r['entities']:
if partial_match and e['type'] == 'neuron' and names[i].lower() in e['name'].lower():
this_name.append(e['skeleton_ids'][0])
if not partial_match and e['type'] == 'neuron' and e['name'] == names[i]:
this_name.append(e['skeleton_ids'][0])
sets_of_skids.append(set(this_name))
# Get skids by annotation
if annotations:
annotation_ids = get_annotation_id(annotations,
allow_partial=partial_match,
remote_instance=remote_instance)
if not annotation_ids:
raise Exception('No matching annotation(s) found!')
if partial_match is True:
logger.debug('Found {0} id(s) (partial matches '
'included)'.format(len(annotation_ids)))
else:
logger.debug('Found id(s): %s | Unable to retrieve: %i' % (
str(annotation_ids), len(annotations) - len(annotation_ids)))
urls = [remote_instance._get_annotated_url() for an in annotation_ids]
post_data = [{'annotated_with': str(an), 'with_annotations': 'false'}
for an in annotation_ids.values()]
results = remote_instance.fetch(urls,
post=post_data,
desc='Get annot')
annotated = [set([e['skeleton_ids'][0] for e in res['entities'] if e['type'] == 'neuron']) for res in results]
# Intersect within search criteria if applicable
if intersect:
sets_of_skids.append(set.intersection(*annotated))
else:
sets_of_skids.append(set.union(*annotated))
# Get skids by user
if users:
urls = [remote_instance._get_list_skeletons_url() for u in users]
GET_data = [{'nodecount_gt': min_size - 1,
'created_by': u} for u in users]
if from_date and to_date:
dates = {'from': ''.join(['{0:02d}'.format(d) for d in from_date]),
'to': ''.join(['{0:02d}'.format(d) for d in to_date])}
GET_data = [{**d, **dates} for d in GET_data]
urls = [u + '?%s' % urllib.parse.urlencode(g) for u, g in zip(urls, GET_data)]
results = remote_instance.fetch(urls, desc='Get users')
# Intersect within search criteria if applicable
if intersect:
sets_of_skids.append(set.intersection(*[set(res) for res in results]))
else:
sets_of_skids.append(set.union(*[set(res) for res in results]))
# Get skids by reviewer
if reviewed_by:
urls = [remote_instance._get_list_skeletons_url() for u in reviewed_by]
GET_data = [{'nodecount_gt': min_size - 1,
'reviewed_by': u} for u in reviewed_by]
if from_date and to_date:
dates = {'from': ''.join(['{0:02d}'.format(d) for d in from_date]),
'to': ''.join(['{0:02d}'.format(d) for d in to_date])}
GET_data = [{**d, **dates} for d in GET_data]
urls = [u + '?%s' % urllib.parse.urlencode(g) for u, g in zip(urls, GET_data)]
results = remote_instance.fetch(urls, desc='Get reviewers')
# Intersect within search criteria if applicable
if intersect:
sets_of_skids.append(set.intersection(*[set(res) for res in results]))
else:
sets_of_skids.append(set.union(*[set(res) for res in results]))
# Get by volume
if volumes:
temp = []
for v in config.tqdm(volumes, desc='Get by vols',
disable=config.pbar_hide,
leave=config.pbar_leave):
if not isinstance(v, ns.Volume):
vol = get_volume(v, remote_instance)
else:
vol = v
temp.append(set(get_neurons_in_bbox(vol.bbox,
remote_instance=remote_instance)))
# Intersect within search criteria if applicable
if intersect:
sets_of_skids.append(set.intersection(*temp))
else:
sets_of_skids.append(set.union(*temp))
# Get neurons by size if only min_size and no other no parameters were
# provided
if False not in [isinstance(param, type(None)) for param in [names,
annotations,
volumes,
users,
reviewed_by,
skids]]:
# Make sure people don't accidentally request ALL neurons in the
# dataset
if min_size <= 1:
answer = ""
while answer not in ["y", "n"]:
answer = input("Your search parameters will retrieve ALL "
"neurons in the dataset. Proceed? "
"[Y/N] ").lower()
if answer != 'y':
logger.info('Query cancelled')
return
logger.info(
'Get all neurons with >= {0} nodes'.format(min_size))
get_skeleton_list_GET_data = {'nodecount_gt': min_size - 1}
remote_get_list_url = remote_instance._get_list_skeletons_url()
remote_get_list_url += '?%s' % urllib.parse.urlencode(
get_skeleton_list_GET_data)
these_neurons = set(remote_instance.fetch(remote_get_list_url))
sets_of_skids.append(these_neurons)
# Now intersect/merge ACROSS search criteria
if intersect:
logger.info('Intersecting by search parameters')
skids = list(set.intersection(*sets_of_skids))
else:
skids = list(set.union(*sets_of_skids))
# Filtering by size was already done for users and reviewed_by and dates
# If we queried by annotations, names or volumes we need to do this
# explicitly here
if min_size > 1 and (volumes or annotations or names):
logger.info('Filtering neurons for size')
get_skeleton_list_GET_data = {'nodecount_gt': min_size - 1}
remote_get_list_url = remote_instance._get_list_skeletons_url()
remote_get_list_url += '?%s' % urllib.parse.urlencode(
get_skeleton_list_GET_data)
neurons_by_size = set(remote_instance.fetch(remote_get_list_url))
skids = set.intersection(set(skids), neurons_by_size)
nl = core.CatmaidNeuronList(list(skids), remote_instance=remote_instance)
if only_soma:
hs = has_soma(nl, return_ids=False, remote_instance=remote_instance)
nl = core.CatmaidNeuronList([n for n in nl if hs[int(n.skeleton_id)]])
if users and minimum_cont:
nl.get_skeletons(skip_existing=True)
nl = core.CatmaidNeuronList([n for n in nl if n.nodes[n.nodes.creator_id.isin(users)].shape[0] >= minimum_cont],
remote_instance=remote_instance)
if nl.empty:
logger.warning(
'No neurons matching the search parameters were found')
else:
logger.info(f'Found {len(nl)} neurons matching the search parameters')
nl.get_names()
return nl
@cache.undo_on_error
def get_neurons_in_volume(volumes, min_nodes=2, min_cable=1, intersect=False,
only_soma=False, remote_instance=None):
"""Retrieves neurons with processes within CATMAID volumes.
This function uses the **BOUNDING BOX** around volume as proxy and queries
for neurons that are within that volume. See examples on how to work
around this.
Warning
-------
Depending on the number of nodes in that volume, this can take quite a
while! Also: by default, will NOT return single-node neurons - use the
``min_nodes`` parameter to change that behaviour.
Parameters
----------
volumes : str | navis.Volume | list of either
Single or list of CATMAID volumes.
min_nodes : int, optional
Minimum node count for a neuron within given
volume(s).
min_cable : int, optional
Minimum cable length [nm] for a neuron within
given volume(s).
intersect : bool, optional
If multiple volumes are provided, this parameter
determines if neurons have to be in all of the
volumes or just a single.
only_soma : bool, optional
If True, only neurons with a soma will be returned.
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
-------
list
``[skeleton_id, skeleton_id, ...]``
See Also
--------
:func:`~pymaid.get_partners_in_volume`
Get only partners that make connections within a
given volume.
:func:`pymaid.find_neurons`
Use to retrieve neurons by combining various
search criteria. For example names, reviewers,
annotations, etc.
Examples
--------
>>> # Get a volume
>>> lh = pymaid.get_volume('LH_R')
>>> # Get neurons within the bounding box of a volume
>>> skids = pymaid.get_neurons_in_volume(lh, min_nodes=10)
>>> # Retrieve 3D skeletons of these neurons
>>> lh_neurons = pymaid.get_neurons(skids)
>>> # Prune by volume
>>> lh_pruned = lh_neurons.copy()
>>> lh_pruned.prune_by_volume(lh)
>>> # Filter neurons with more than 100um of cable in the volume
>>> n = lh_neurons[lh_pruned.cable_length > 100]
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(volumes, (list, np.ndarray)):
volumes = [volumes]
for i, v in enumerate(volumes):
if not isinstance(v, ns.Volume):
volumes[i] = get_volume(v)
neurons = []
for v in volumes:
logger.info('Retrieving neurons in volume {0}'.format(v.name))
temp = get_neurons_in_bbox(v, min_nodes=min_nodes,
min_cable=min_cable,
remote_instance=remote_instance)
if not intersect:
neurons += list(temp)
else:
neurons += [temp]
if intersect:
# Filter for neurons that show up in all neuropils
neurons = [n for l in neurons for n in l if False not in [n in v for v in neurons]]
# Need to do this in case we have several volumes
neurons = list(set(neurons))
if only_soma:
soma = has_soma(neurons, remote_instance=remote_instance)
neurons = [n for n in neurons if soma[n] is True]
logger.info('Done. {0} unique neurons found in volume(s) '
'{1}'.format(len(neurons),
','.join([v.name for v in volumes])))
return neurons
@cache.undo_on_error
def get_neurons_in_bbox(bbox, unit='NM', min_nodes=1, min_cable=1,
remote_instance=None, **kwargs):
"""Retrieve neurons with processes within a defined box volume.
Parameters
----------
bbox : list-like | dict | navis.Volume
Coordinates of the bounding box. Can be either:
1. List/np.array: ``[[left, right], [top, bottom], [z1, z2]]``
2. Dictionary ``{'left': int|float, 'right': ..., ...}``
unit : 'NM' | 'PIXEL'
Unit of your coordinates. Attention:
'PIXEL' will also assume that Z1/Z2 is in slices.
By default, xyz resolution of 4x4x40nm per pixel
is assumed. Pass e.g. ``res=[8, 8, 40]`` as keyword
argument to override this.
min_nodes : int, optional
Minimum node count for a neuron within given
bounding box.
min_cable : int, optional
Minimum cable length [nm] for a neuron within
given bounding box.
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
-------
list
``[skeleton_id, skeleton_id, ...]``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(bbox, ns.Volume):
bbox = bbox.bbox
if isinstance(bbox, dict):
bbox = np.array([[bbox['left'], bbox['right']],
[bbox['top'], bbox['bottom']],
[bbox['z1'], bbox['z2']]
])
bbox = np.asarray(bbox)
if bbox.shape == (2, 3):
bbox = bbox.T
if unit == 'PIXEL':
res = np.asarray(kwargs.get('res', [4, 4, 40])).reshape(3, 1)
bbox *= res
url = remote_instance._get_skeletons_in_bbox(minx=min(bbox[0]),
maxx=max(bbox[0]),
miny=min(bbox[1]),
maxy=max(bbox[1]),
minz=min(bbox[2]),
maxz=max(bbox[2]),
min_nodes=min_nodes,
min_cable=min_cable)
return remote_instance.fetch(url)
@cache.undo_on_error
def get_user_list(remote_instance=None):
"""Get list of users.
Parameters
----------
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
------
pandas.DataFrame
DataFrame in which each row represents a user::
id login full_name first_name last_name color
0
1
...
Examples
--------
>>> user_list = pymaid.get_user_list()
>>> # To search for e.g. user ID 22
>>> user_list.set_index('id', inplace=True)
>>> user_list.loc[22]
id 22
login mustermannm
full_name Michaela Mustermann
first_name Michael
last_name Mustermann
color [0.91389, 0.877853, 1.0]
>>> user_list.reset_index(inplace=True)
>>> # To convert into a classic dict
>>> d = user_list.set_index('id').T.to_dict()
>>> d[22]['first_name']
... Michaela
"""
remote_instance = utils._eval_remote_instance(remote_instance)
user_list = remote_instance.fetch(remote_instance._get_user_list_url())
# It appears that for public CATMAID instances (like VFB) where the users
# are masked, the user-list endpoint can return just a single dictionary
# instead of a list of dicts.
if isinstance(user_list, dict):
user_list = [user_list]
# The user list can contain different entries
# Here we define alternative field names
columns = [('id', 'userid'),
('login', 'username'),
('full_name', 'long_name'),
('first_name', ),
('last_name', ),
('color', )]
data = []
for user in user_list:
row = []
for col in columns:
value = None
for key in col:
if key in user:
value = user[key]
break
row.append(value)
data.append(row)
df = pd.DataFrame(data, columns=[c[0] for c in columns])
df.sort_values(['login', 'id'], inplace=True)
df.reset_index(inplace=True, drop=True)
return df
@cache.undo_on_error
def get_paths(sources, targets, n_hops=2, min_synapses=1, return_graph=False,
remove_isolated=False, remote_instance=None):
"""Fetch paths between two sets of neurons.
Parameters
----------
sources
Source neurons.
targets
Target neurons. ``sources`` and ``targets`` can be:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation as e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
n_hops : int | list | range, optional
Number of hops allowed between sources and
targets. Direct connection would be 1 hop.
1. int, e.g. ``n_hops=3`` will return paths with
EXACTLY 3 hops
2. list, e.g. ``n_hops=[2,4]`` will return all
paths with 2 and 4 hops
3. range, e.g. ``n_hops=range(2,4)`` will be converted
to a list and return paths with 2 and 3 hops.
min_synapses : int, optional
Minimum number of synpases between source and target.
return_graph : bool, optional
If True, will return NetworkX Graph (see below).
remove_isolated : bool, optional
Remove isolated nodes from NetworkX Graph. Only
relevant if ``return_graph=True``.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
paths : list
List of skeleton IDs that constitute paths from
sources to targets::
[[source1, ..., target1], [source2, ..., target2], ...]
networkx.DiGraph
Only if ``return_graph=True``. Graph contains all neurons that
connect sources and targets. **Important**: Does only contain
edges that connect sources and targets via max ``n_hops``!
Other edges have been removed.
Examples
--------
>>> # This assumes that you have already set up a CatmaidInstance
>>> import networkx as nx
>>> import matplotlib.pyplot as plt
>>> g, paths = pymaid.get_paths(['annotation:glomerulus DA1'],
... ['2333007'])
>>> g
<networkx.classes.digraph.DiGraph at 0x127d12390>
>>> paths
[['57381', '4376732', '2333007'], ['57323', '630823', '2333007'], ...
>>> nx.draw(g)
>>> plt.show()
"""
remote_instance = utils._eval_remote_instance(remote_instance)
sources = utils.eval_skids(sources, remote_instance=remote_instance)
targets = utils.eval_skids(targets, remote_instance=remote_instance)
targets = utils._make_iterable(targets).astype(int)
sources = utils._make_iterable(sources).astype(int)
if isinstance(n_hops, (int, np.int)):
n_hops = [n_hops]
if not utils._is_iterable(n_hops):
raise TypeError('Expected `n_hops` to be iterable or integer, got '
f'"{type(n_hops)}"')
if min(n_hops) <= 0:
raise ValueError('n_hops must not be <= 0')
# We need to query to endpoints:
# First get the neurons involved
response = []
url = remote_instance._get_graph_dps_url()
for h in range(1, max(n_hops) + 1):
if h == 1:
response += list(sources) + list(targets)
continue
post_data = {
'n_hops': h,
'min_synapses': min_synapses
}
for i, s in enumerate(sources):
post_data['sources[%i]' % i] = s
for i, t in enumerate(targets):
post_data['targets[%i]' % i] = t
# Response is just a set of skeleton IDs
response += remote_instance.fetch(url, post=post_data)
# Get unique edges
skids = np.unique(np.asarray(response).astype(int))
# Now get edges between those neurons
edges = get_edges(skids, remote_instance=remote_instance)
# Turn neurons into an NetworkX graph
g = ns.network2nx(edges, threshold=min_synapses)
# Get all paths between sources and targets
all_paths = [p for s in sources for t in targets for p in
nx.all_simple_paths(g, s, t,
cutoff=max(n_hops)) if len(p) - 1 in n_hops]
if not return_graph:
return all_paths
# Turn into edges
edges_to_keep = set([e for l in all_paths for e in nx.utils.pairwise(l)])
# Remove edges
g.remove_edges_from([e for e in g.edges if e not in edges_to_keep])
if remove_isolated:
# Remove isolated nodes
g.remove_nodes_from(list(nx.isolates(g)))
return all_paths, g
@cache.undo_on_error
def get_volume(volume_name=None, color=(120, 120, 120, .6), combine_vols=False,
remote_instance=None):
"""Retrieves volume (mesh).
Parameters
----------
volume_name : int | str | list of str or int
Name(s) (as ``str``) or ID (as ``int``) of the volume
to import. Names must be EXACT!
If ``volume_name=None``, will return list of all
available CATMAID volumes. If list of volume names,
will return a dictionary ``{name: Volume, ... }``
color : tuple, optional
(R, G, B, alpha) values used by :func:`~pymaid.plot3d`.
combine_vols : bool, optional
If True and multiple volumes are requested, the will
be combined into a single volume.
remote_instance : CATMAIDInstance, optional
If not passed directly, will try using global.
Returns
-------
navis.Volume
If ``volume_name`` is list of volumes, returns a dictionary of
Volumes: ``{name1: Volume1, name2: Volume2, ...}``
Examples
--------
>>> import pymaid
>>> rm = CatmaidInstance('server_url', 'api_token', 'http_user', 'http_pw')
>>> # Retrieve volume
>>> vol = pymaid.get_volume('LH_R')
>>> # Plot volume
>>> vol.plot3d()
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(volume_name, type(None)):
logger.info('Retrieving list of available volumes.')
elif not isinstance(volume_name, (int, str, list, np.ndarray)):
raise TypeError('Volume name must be id (int), str or list of either, not {}.'.format(type(volume_name)))
volume_names = utils._make_iterable(volume_name)
# First, get volume IDs
get_volumes_url = remote_instance._get_volumes()
response = remote_instance.fetch(get_volumes_url)
all_vols = pd.DataFrame(response['data'], columns=response['columns'])
if isinstance(volume_name, type(None)):
return all_vols
req_vols = all_vols[(all_vols.name.isin(volume_names)) |
(all_vols.id.isin(volume_names))]
volume_ids = req_vols.id.values
if len(volume_ids) < len(volume_names):
not_found = set(volume_names).difference(set(all_vols.name) |
set(all_vols.id))
raise Exception(
'No volume(s) found for: {}'.format(','.join(not_found)))
url_list = [remote_instance._get_volume_details(v) for v in volume_ids]
# Get data
responses = remote_instance.fetch(url_list, desc='Volumes')
# Generate volume(s) from responses
volumes = {}
for r in responses:
mesh_str = r['mesh']
mesh_name = r['name']
mesh_id = r['id']
mesh_type = re.search('<(.*?) ', mesh_str).group(1)
# Now reverse engineer the mesh
if mesh_type == 'IndexedTriangleSet':
t = re.search("index='(.*?)'", mesh_str).group(1).split(' ')
faces = [(int(t[i]), int(t[i + 1]), int(t[i + 2]))
for i in range(0, len(t) - 2, 3)]
v = re.search("point='(.*?)'", mesh_str).group(1).split(' ')
vertices = [(float(v[i]), float(v[i + 1]), float(v[i + 2]))
for i in range(0, len(v) - 2, 3)]
elif mesh_type == 'IndexedFaceSet':
# For this type, each face is indexed and an index of -1 indicates
# the end of this face set
t = re.search("coordIndex='(.*?)'", mesh_str).group(1).split(' ')
faces = []
this_face = []
for f in t:
if int(f) != -1:
this_face.append(int(f))
else:
faces.append(this_face)
this_face = []
# Make sure the last face is also appended
faces.append(this_face)
v = re.search("point='(.*?)'", mesh_str).group(1).split(' ')
vertices = [(float(v[i]), float(v[i + 1]), float(v[i + 2]))
for i in range(0, len(v) - 2, 3)]
else:
logger.error("Unknown volume type: %s" % mesh_type)
raise Exception("Unknown volume type: %s" % mesh_type)
# In this format vertices are not unique - i.e. a given vertex defined
# by its x/y/z position shows up as many times as it participates in
# a face.
# Fortunately, navis.Volume being a subclass of trimesh.Trimesh takes
# care of the deduplication
v = ns.Volume(name=mesh_name,
volume_id=mesh_id,
vertices=vertices,
faces=faces,
color=color)
volumes[mesh_name] = v
# Return just the volume if a single one was requested
if len(volumes) == 1:
return list(volumes.values())[0]
return volumes
@cache.undo_on_error
def get_annotation_list(remote_instance=None):
"""Get a list of all annotations in the project.
Parameters
----------
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas DataFrame
DataFrame in which each row represents an annotation::
name id users
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
an = remote_instance.fetch(remote_instance._get_annotation_list())[
'annotations']
df = pd.DataFrame.from_dict(an)
return df
def url_to_coordinates(coords, stack_id, active_skeleton_id=None,
active_node_id=None, zoom=0, tool='tracingtool',
open_browser=False, remote_instance=None):
"""Generate URL to a location.
Parameters
----------
coords : list | np.ndarray | pandas.DataFrame
``x``, ``y``, ``z`` coordinates.
stack_id : int | list/array of ints
ID of the image stack you want to link to.
Depending on your setup this parameter might be
overriden by local user settings.
active_skeleton_id : int | list/array of ints, optional
Skeleton ID of the neuron that should be selected.
active_node_id : int | list/array of ints, optional
Node/Connector ID of the node that should be
active.
zoom : int, optional
tool : str, optional
open_browser : bool, optional
If True will open *all* generated URLs as new
tabs in the standard webbrowser.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
str | list of str
URL(s) to the coordinates provided.
Examples
--------
>>> # Get URL for a single coordinate
>>> url = pymaid.url_to_coordinates([1000, 1000, 1000], stack_id=5)
>>> # Get URLs for all low-confidence nodes of a neuron
>>> n = pymaid.get_neuron(27295)
>>> low_c = n.nodes.loc[n.nodes.confidence < 5]
>>> urls = pymaid.url_to_coordinates(low_c[['x', 'y', 'z']].values,
... stack_id=5,
... active_node_id=low_c.node_id.values)
"""
def gen_url(c, stid, nid, sid):
""" This function generates the actual urls
"""
GET_data = {'pid': remote_instance.project_id,
'xp': int(c[0]),
'yp': int(c[1]),
'zp': int(c[2]),
'tool': tool,
'sid0': stid,
's0': zoom
}
if sid:
GET_data['active_skeleton_id'] = sid
if nid:
GET_data['active_node_id'] = nid
return(remote_instance.make_url('?%s' % urllib.parse.urlencode(GET_data)))
def list_helper(x):
""" Helper function to turn variables into lists matching length of coordinates
"""
if not isinstance(x, (list, np.ndarray)):
return [x] * len(coords)
elif len(x) != len(coords):
raise ValueError('Parameters must be the same shape as coords.')
else:
return x
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(coords, (pd.DataFrame, pd.Series)):
try:
coords = coords[['x', 'y', 'z']].values
except BaseException:
raise ValueError(
'Pandas DataFrames must have "x","y" and "z" columns.')
elif isinstance(coords, list):
coords = np.array(coords)
if isinstance(coords, np.ndarray) and coords.ndim > 1:
stack_id = list_helper(stack_id)
active_skeleton_id = list_helper(active_skeleton_id)
active_node_id = list_helper(active_node_id)
urls = [gen_url(c, stid, nid, sid) for c, stid, nid, sid in zip(coords, stack_id, active_node_id, active_skeleton_id)]
if open_browser:
for u in urls:
webbrowser.open_new_tab(u)
return urls
else:
url = gen_url(coords, stack_id, active_node_id, active_skeleton_id)
if open_browser:
webbrowser.open_new_tab(url)
return url
@cache.undo_on_error
def get_node_location(x, sort=True, remote_instance=None):
"""Retrieves location for a set of nodes or connectors.
Parameters
----------
x : int | list of int
Node ID(s).
sort : bool, optional
If True, will sort returned DataFrame to be in the same
order as input data.
remote_instance : CatmaidInstance, optional
If not provided, will search for globally defined
remote instance.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a node::
node_id x y z
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_node_ids(x, connectors=True, nodes=True)
url = remote_instance._get_node_location_url()
post = {'node_ids[{}]'.format(i): n for i, n in enumerate(x)}
data = remote_instance.fetch(url, post=post)
df = pd.DataFrame(data, columns=['node_id', 'x', 'y', 'z'])
if sort:
df = df.set_index('node_id').loc[x].reset_index(drop=False)
return df
@cache.undo_on_error
def get_label_list(remote_instance=None):
"""Retrieves all labels (node tags only) in a project.
Parameters
----------
remote_instance : CatmaidInstance, optional
If not provided, will search for globally defined
remote instance.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a label::
label_id tag skeleton_id node_id
0
1
...
Examples
--------
>>> # Get all labels
>>> labels = pymaid.get_label_list()
>>> # Get all nodes with a given tag
>>> nodes = labels[labels.tag == 'my_label'].node_id
>>> # Get neuron that have at least a single node with a given tag
>>> neurons = labels[ labels.tag == 'my_label' ].skeleton_id.unique()
"""
remote_instance = utils._eval_remote_instance(remote_instance)
labels = remote_instance.fetch(remote_instance._get_label_list_url())
return pd.DataFrame(labels, columns=['label_id', 'tag', 'skeleton_id',
'node_id'])
@cache.undo_on_error
def get_transactions(range_start=None, range_length=25, remote_instance=None):
"""Retrieve individual transactions with server.
**This API endpoint is extremely slow!**
Parameters
----------
range_start : int, optional
Start of table. Transactions are returned in
chronological order (most recent transactions first)
range_length : int, optional
End of table. If None, will return all.
remote_instance : CatmaidInstance, optional
If not provided, will search for globally defined
CatmaidInstance.
Returns
-------
pandas.DataFrame
DataFrame listing individual transactions::
change_type execution_time label ...
0 Backend 2017-12-26 03:37:00 labels.update ...
1 Backend 2017-12-26 03:37:00 treenodes.create ...
2 Backend 2017-12-26 03:37:00 treenodes.create ...
3 Backend 2017-12-26 03:37:00 treenodes.create ...
4 Backend 2017-12-26 03:32:00 treenodes.create ...
project_id transaction_id user_id user
0 1 404899166 151 dacksa
1 1 404899165 151 dacksa
2 1 404899164 151 dacksa
3 1 404899163 151 dacksa
4 1 404899162 151 dacksa
"""
remote_instance = utils._eval_remote_instance(remote_instance)
remote_transactions_url = remote_instance._get_transactions_url()
desc = {'range_start': range_start, 'range_length': range_length}
desc = {k: v for k, v in desc.items() if v is not None}
remote_transactions_url += '?%s' % urllib.parse.urlencode(desc)
data = remote_instance.fetch(remote_transactions_url)
df = pd.DataFrame.from_dict(data['transactions'])
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
df['user'] = df.user_id.map(user_dict)
df['execution_time'] = [datetime.datetime.strptime(
d[:16], '%Y-%m-%dT%H:%M') for d in df['execution_time'].values]
return df
@cache.undo_on_error
def get_neuron_id(x, remote_instance=None):
"""Get neuron ID(s) for given skeleton(s).
Parameters
----------
x : list-like | CatmaidNeuron/List
Skeleton IDs for which to get neuron IDs.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{skeleton_id (str): neuron_id (int), ... }``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_neuron_ids_url()
post = {'model_ids[{}]'.format(i): s for i, s in enumerate(skids)}
resp = remote_instance.fetch(url, post=post)
return resp
@cache.undo_on_error
def get_cable_lengths(x, chunk_size=500, remote_instance=None):
"""Get cable lengths directly from Catmaid Server.
Parameters
----------
x : list-like | CatmaidNeuron/List
Skeleton IDs for which to get cable lengths.
chunk_size : int, optional
Retrieves cable in chunks of given size.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{skeleton_id (str): cable [nm] (int), ... }``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_neuron_cable_url()
cable = {}
for i in config.trange(0, len(skids), int(chunk_size),
desc='Fetching chunks'):
chunk = skids[i: i + chunk_size]
post = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(chunk)}
resp = remote_instance.fetch(url, post=post)
cable.update(resp)
return cable
@cache.undo_on_error
def get_connectors_in_bbox(bbox, unit='NM', limit=None, restrict_to=False,
ret='COORDS', remote_instance=None, **kwargs):
"""Retrieves connectors within given bounding box.
Parameters
----------
bbox : list-like | dict | pymaid.Volume
Coordinates of the bounding box. Can be either:
1. List/np.array: ``[[left, right], [top, bottom], [z1, z2]]``
2. Dictionary ``{'left': int|float, 'right': ..., ...}``
unit : 'NM' | 'PIXEL'
Unit of your coordinates. Attention:
'PIXEL' will also assume that Z1/Z2 is in slices.
By default, a X/Y resolution of 3.8nm and a Z
resolution of 35nm is assumed. Pass 'xy_res' and
'z_res' as ``**kwargs`` to override this.
limit : int, optional
Limit the number of connectors returned.
restrict_to : list, optional
List of skeleton IDs to return connectors for.
ret : 'IDS' |'COORDS' | 'LINKS'
Connector data to be returned. See below for
explanation.
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
If ``ret="COORDS"`` (default): DataFrame in which each row
represents a connector:
connector_id x y z
0
1
..
list
If ``ret="IDS"``: list of connector IDs.
pandas.DataFrame
If ``ret="LINKS"``: DataFrame in which each row represents a
connector. Please note that connectors can show up multiple times
- once for each link.
connector_id x y z skeleton confidence creator_id ..
0
1
..
.. connected_node creation_time edition_time relation_id
0
1
..
"""
if ret.upper() not in ['IDS', 'COORDS', 'LINKS']:
raise ValueError('"ret" must be "IDS", "COORDS" or "LINKS"')
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(bbox, ns.Volume):
bbox = bbox.bbox
if isinstance(bbox, dict):
bbox = np.array([[bbox['left'], bbox['right']],
[bbox['top'], bbox['bottom']],
[bbox['z1'], bbox['z2']]
])
if not isinstance(bbox, np.ndarray):
bbox = np.array(bbox)
if unit == 'PIXEL':
bbox[[0, 1], :] = bbox[[0, 1], :] * kwargs.get('xy_res', 4)
bbox[[2], :] = bbox[[2], :] * kwargs.get('z_res', 40)
url = remote_instance._get_connector_in_bbox_url()
post = dict(minx=min(bbox[0]),
maxx=max(bbox[0]),
miny=min(bbox[1]),
maxy=max(bbox[1]),
minz=min(bbox[2]),
maxz=max(bbox[2]),
limit=limit if limit else 0
)
if ret.upper() in ['COORDS', 'LINKS']:
# post['with_links'] = True
post['with_locations'] = True
if ret.upper() == 'LINKS':
post['with_links'] = True
if restrict_to:
restrict_to = utils._make_iterable(restrict_to)
post.update({'skeleton_ids[{}]'.format(i): s for i, s in enumerate(restrict_to)})
data = remote_instance.fetch(url, post=post)
if ret.upper() == 'IDS':
return data
data = pd.DataFrame(data)
if ret.upper() == 'COORDS':
data.columns = ['connector_id', 'x', 'y', 'z']
else:
data.columns = ['connector_id', 'x', 'y', 'z', 'skeleton',
'confidence', 'creator_id', 'connected_node',
'creation_time', 'edition_time', 'relation_id']
return data
@cache.undo_on_error
def get_connectivity_counts(x, source_relations=['presynaptic_to'],
target_relations=['postsynaptic_to'],
count_partner_links=True, remote_instance=None):
"""Fetch number of connections of a given type for a set of neurons.
Parameters
----------
x : list-like | CatmaidNeuron/List
Skeleton IDs for which to get cable lengths.
source_relations : str | list of str, optional
A list of pre-connector relations.
target_relations : str | list of str, optional
A list of post-connector relations. Default
settings count the number of outgoing connections
for the input neurons.
count_partner_links : bool, optional
Whether to count partner links or links
to a connector.
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Examples
--------
# Get the count of all outgoing connections (default):
>>> counts = pymaid.get_connectivity_counts('annotation:glomerulus DA1')
# Get both incoming and outgoing connections:
>>> counts = pymaid.get_connectivity_counts('annotation:glomerulus DA1',
... source_relations=['presynaptic_to',
... 'postsynaptic_to'],
... target_relations=['postsynaptic_to',
... 'presynaptic_to'])
Returns
-------
dict
Dictionary with server response.
{'connectivity': {skid1: {relation_ID: count},
skid2: {relation_ID: count}},
'relations': {relation_ID: relation_name}}
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_connectivity_counts_url()
source_relations = utils._make_iterable(source_relations)
target_relations = utils._make_iterable(target_relations)
post = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(skids)}
post.update({'source_relations[{}]'.format(i): s for i, s in enumerate(source_relations)})
post.update({'target_relations[{}]'.format(i): t for i, t in enumerate(target_relations)})
return remote_instance.fetch(url, post=post)
@cache.undo_on_error
def get_import_info(x, with_nodes=False, chunk_size=500, remote_instance=None):
"""Get count of imported nodes for given neuron(s).
Parameters
----------
x : list-like | CatmaidNeuron/List
Skeleton IDs for which to get import info.
with_nodes : bool, optional
Whether to include IDs of all imported nodes.
chunk_size : int, optional
Retrieves data in chunks of this size.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_import_info_url()
info = {}
for i in config.trange(0, len(skids), int(chunk_size),
desc='Fetching info'):
chunk = skids[i: i + chunk_size]
post = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(chunk)}
post['with_treenodes'] = with_nodes
resp = remote_instance.fetch(url, post=post)
info.update(resp)
return info
@cache.undo_on_error
def get_origin(x, chunk_size=500, remote_instance=None):
"""Get origin of given neuron(s).
Parameters
----------
x : list-like | CatmaidNeuron/List
Skeleton IDs for which to get their origin.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
{'data_sources': {'1': {'name': None,
'source_project_id': 1,
'url': 'https://.../tracing/fafb/v14-seg-li-190805.0'}},
'origins': {'13348203': {'data_source_id': 1, 'source_id': 13348108}}}
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_skeleton_origin_url()
post = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(skids)}
resp = remote_instance.fetch(url, post=post)
return resp
@cache.undo_on_error
def get_skids_by_origin(source_ids, source_url, source_project_id,
remote_instance=None):
"""Get skeleton IDs by origin.
Parameters
----------
source_ids : list of int
Source IDs to search for.
source_url : str
Source url to search for.
source_project_id : int
Source project ID to search for.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
{'source_id': skeleton_id}
"""
remote_instance = utils._eval_remote_instance(remote_instance)
source_ids = utils._make_iterable(source_ids)
url = remote_instance._get_skeleton_by_origin_url()
post = {'source_ids[{}]'.format(i): s for i, s in enumerate(source_ids)}
post['source_url'] = source_url
post['source_project_id'] = source_project_id
resp = remote_instance.fetch(url, post=post)
return resp
@cache.undo_on_error
def get_sampler(x=None, remote_instance=None):
"""Get list of reconstruction samplers.
Parameters
----------
x : list-like | CatmaidNeuron/List | None, optional
Skeleton IDs for which to get samplers. If ``None``
will return all samplers.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame containing all samplers. Returns empty
DataFrame if no samplers.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(x, type(None)):
url = remote_instance._get_sampler_list_url()
else:
skids = utils.eval_skids(x, remote_instance=remote_instance)
GET = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(skids)}
url = remote_instance._get_sampler_list_url(**GET)
resp = remote_instance.fetch(url)
if not resp:
return pd.DataFrame([])
# Turn into DataFrame
df = pd.DataFrame.from_records(resp)
# Convert timestamps
df['creation_time'] = pd.to_datetime(df.creation_time, unit='s', utc=True)
df['edition_time'] = pd.to_datetime(df.creaedition_timetion_time, unit='s', utc=True)
return df
@cache.undo_on_error
def get_sampler_domains(sampler, remote_instance=None):
"""Get list of domains for given sampler.
Parameters
----------
sampler : int
ID of sampler to fetch domains for.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame containing domains for given sampler.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
url = remote_instance._get_sampler_domains_url(sampler)
resp = remote_instance.fetch(url)
# Turn into DataFrame
df = pd.DataFrame.from_records(resp)
# Convert timestamps
df['creation_time'] = pd.to_datetime(df.creation_time, unit='s', utc=True)
df['edition_time'] = pd.to_datetime(df.creaedition_timetion_time, unit='s', utc=True)
return df
@cache.undo_on_error
def get_sampler_counts(x, remote_instance=None):
"""Get number of reconstruction samplers for a set of neurons.
Parameters
----------
x : list-like | CatmaidNeuron/List | None, optional
Skeleton IDs for which to get sampler counts.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{skeleton_id: count, ...}``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_sampler_counts_url()
post = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(skids)}
resp = remote_instance.fetch(url, post=post)
return resp
@cache.undo_on_error
def get_skeleton_change(x, chunk_size=50, remote_instance=None):
"""Get split and merge history of skeletons.
Parameters
----------
x : list-like | CatmaidNeuron/List | None, optional
Skeleton IDs for which to get split/merge history.
chunk_size : int, optional
Change history will be queried in chunks. Reduce
the number if you experience problems.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
list
List of unique skeleton paths in historic order,
newest last::
[[skid1, skid2, ..., skidN], ...]
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
change = []
with config.tqdm(desc='Fetching change', total=len(skids),
disable=config.pbar_hide,
leave=config.pbar_leave) as pbar:
chunks = [skids[i:i + chunk_size] for i in range(0, len(skids), chunk_size)]
for ch in chunks:
# Generate URLs
GET = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(ch)}
url = remote_instance._get_skeleton_change_url(**GET)
# Fetch data
resp = remote_instance.fetch(url, disable_pbar=True)
change += resp
# Update progress bar
pbar.update(len(ch))
return change
|
schlegelp/pymaid
|
pymaid/fetch.py
|
Python
|
gpl-3.0
| 183,875
|
[
"NEURON"
] |
1381ecec4c0caa885779d462850eb903d0101dca9c3eaeab0c1ce8d9c2a01740
|
'''
|marx| offers several different source shapes. Tests in this module exercise
those sources (except ``SAOSAC``, which is heavily used in
:ref:`sect-tests.PSF` already).
'''
import shutil
import subprocess
import os
from collections import OrderedDict
from marxtest import base
title = 'Sources in |marx|'
tests = ['GeometricSources', 'ImageSource',
#'RayfileSource',
'UserSource']
class GeometricSources(base.MarxTest):
'''This test exercises build-in |marx| sources with different geometric
shapes.
Most source types have parameters, and not all parameters are tested here.
See :ref:`sect-sourcemodels` for a detailed description of source
parameters.
'''
title = 'Build-in geometric sources'
figures = OrderedDict([('ds9', {'alternative': 'Six PSFs.',
'caption': '`ds9`_ image of the simulated PSFs in alphabetical order (beta distribution, disk, disk with hole, Gauss, line, and point).'})
])
@base.Marx
def step_10(self):
return [{'OutputDir': 'point'},
{'SourceType': 'GAUSS', 'S-GaussSigma': 20,
'OutputDir': 'gauss'},
{'SourceType': 'BETA', 'S-BetaCoreRadius': 10,
'S-BetaBeta': 0.6, 'OutputDir': 'beta'},
{'SourceType': 'DISK',
'S-DiskTheta0': 0, 'S-DiskTheta1': 20,
'OutputDir': 'disk'},
{'SourceType': 'DISK',
'S-DiskTheta0': 10, 'S-DiskTheta1': 20,
'OutputDir': 'diskhole'},
{'SourceType': 'LINE', 'S-LinePhi': 45, 'S-LineTheta': 30,
'OutputDir': 'line'},
]
# more to come for SAOSAC, RAYFILE, SIMPUT, USER
# but first make something work here
@base.Marx2fits
def step_20(self):
dirs = ['point', 'gauss', 'beta', 'disk', 'diskhole',
'line']
return ['--pixadj=EDSER'] * len(dirs), dirs, [d + '.fits' for d in dirs]
@base.Ciao
def step_30(self):
'''ds9 images of the PSF'''
return ['''ds9 -width 800 -height 500 -log -cmap heat *.fits -pan to 4018 4141 physical -match frame wcs -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
class ImageSource(base.MarxTest):
'''An image can be used as |marx| input. In this case, the intensity of the
X-ray radiation on that sky is taken to be proportional to the value of the
image at that point.
'''
title = 'Image as source'
figures = OrderedDict([('ds9', {'alternative': 'The simulated events generally follow the input image, but with significant noise because of the short observation time.',
'caption': '`ds9`_ shows the input image (left) and the simulated event list (right).'})
])
@base.Python
def step_0(self):
'''Make input image
In this example we use python to make a simple image as input.
We setup a 3-d box and fill it with an emitting shell. We then
integrate along one dimension to obtain a collapsed image.
Physically, this represents the thin shell of a supernova
explosion.
'''
import numpy as np
from astropy.wcs import WCS
from astropy.io import fits
# Actually to make this run faster, we'll do only one quadrant here
cube = np.zeros((201, 201, 201))
mg = np.mgrid[0: 201., 0:201, 0:201 ]
d = np.sqrt(mg[0, :, :, :]**2 + mg[1, :, :, :]**2 + mg[2, :, :, :]**2)
cube[(d > 160.) & (d < 170)] = 1
im = cube.sum(axis=0)
# Now rotate and put the four quarters together
image = np.zeros((401, 401))
image[:201, :201] = np.fliplr(np.flipud(im))
image[:201, 200:] = np.flipud(im)
image[200:, :201] = np.fliplr(im)
image[200:, 200:] = im
# Create a new WCS object.
w = WCS(naxis=2)
w.wcs.crpix = [100., 100.]
# Pixel size of our image shall be 1 arcsec
w.wcs.cdelt = [1. / 3600., 1. / 3600.]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
# Now, write out the WCS object as a FITS header
header = w.to_header()
# header is an astropy.io.fits.Header object. We can use it to create a new
# PrimaryHDU and write it to a file.
hdu = fits.PrimaryHDU(header=header, data=image)
# Save to FITS file
hdu.writeto(os.path.join(self.basepath, 'input_image.fits'), clobber=True)
@base.Marx
def step_1(self):
'''Run |marx|.
We run a monoenergetic simulation here for the Si XIII line at 6.65 Ang.
'''
return {'SourceType': "IMAGE", 'S-ImageFile': 'input_image.fits',
'MinEnergy': 1.9, 'MaxEnergy': 1.9, 'GratingType': 'NONE',
'OutputDir': 'image'}
@base.Marx2fits
def step_2(self):
return '--pixadj=EDSER', 'image', 'image.fits'
@base.Ciao
def step_30(self):
'''ds9 images of the PSF'''
return ['''ds9 -width 800 -height 500 -log -cmap heat input_image.fits image.fits -pan to 4018 4141 physical -zoom 0.5 -sleep 1 -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
class RayfileSource(base.MarxTest):
'''|marx| is a Monte-Carlo code, thus the exact distribution of photons
on the sky will be different every time the code is run. Sometimes it
can be useful to generate a list of photons with position, time and
energy from the source on the sky and then "observe" the exact same list
with different instrument configurations so that any differences in the
result are only due to the different configuration and not to random
fluctuations in the source.
In this example, we look at a relatively large, diffuse emission region
with a very soft spectrum (for simplicity we are using a flat spectrum).
We compare simulations using ACIS-S and ACIS-I. ACIS-S has a better
response to soft photons, but some parts of the source may not be in the
field-of-view; ACIS-I is less efficient for soft photons, but has a
larger field-of-view.
'''
title = 'Using a RAYFILE source'
figures = OrderedDict([('ds9', {'alternative': 'As described above, ACIS-S shows more photons, but ACIS-I does include more the wings of the Gaussian source distribution',
'caption': '`ds9`_ shows the ACIS-I (left) and ACIS-S image (right). Both sources are generated from the same photon list. Sometimes the same pattern of photons can be seen in both images, but with a few events missing on ACIS-I due to the lower soft response.'})
])
@base.Marx
def step_1(self):
'''Write ray file
'''
return {'SourceType': 'GAUSS', 'S-GaussSigma': 300,
'DumpToRayFile': 'yes', 'MinEnergy': 0.3, 'MaxEnergy': 0.5}
@base.Marx
def step_2(self):
'''ACIS-S'''
return {'SourceType': 'RAYFILE', 'RayFile': 'marx.output',
'OutputDir': 'aciss', 'DetectorType': 'ACIS-S'}
@base.Marx
def step_3(self):
'''ACIS-I'''
return {'SourceType': 'RAYFILE', 'RayFile': 'marx.output',
'OutputDir': 'acisi', 'DetectorType': 'ACIS-I'}
@base.Marx2fits
def step_4(self):
'''Turn into fits files
We use the ``EXACT`` setting here to make the comparison simpler.
The default EDSER (energy-dependent sub-pixel event repositioning)
shifts photons of the same energy by a different amount for ACIS-S and
ACIS-I, which would make it harder to compare the resulting images.
'''
return ['--pixadj=EXACT', '--pixadj=EXACT'], ['acisi', 'aciss'], ['i.fits', 's.fits']
@base.Ciao
def step_30(self):
'''ds9 images of the PSF'''
return ['''ds9 -width 800 -height 500 -log -cmap heat i.fits s.fits -pan to 4018 4141 physical -match frame wcs -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
class SimputSource(base.MarxTest):
pass
class UserSource(base.MarxTest):
'''Run an example for a USER source.
|marx| comes with several examples for user written source in C.
These can be compiled as shared objects and dynamically linked into |marx|
at run time.
To test this, we copy one of the source files from the installed |marx|
version and compile it with gcc. This particular case is not very useful,
because |marx| already has a point source with the same properties
build-in. The purpose of this test is only to have an automatic check that
the dynamic linking works.
'''
title = 'Compiling a USER source'
figures = OrderedDict([('ds9', {'alternative': 'A point source',
'caption': '`ds9`_ shows that the distribution of source is indeed a point source.'})
])
@base.Python
def step_1(self):
'''compile USER code
|marx| ships with a few examples of user sources. We pick one
of them, copy them to the right directory and compile it with gcc.
'''
marxpath = self.conf.get('marx', 'path')
src = os.path.join(marxpath,
'share', 'doc', 'marx', 'examples', 'user-source')
for f in ['point.c', 'user.h']:
shutil.copy(os.path.join(src, f),
os.path.join(self.basepath, f))
jdmath_h = os.path.join(marxpath, 'include')
jdmath_a = os.path.join(marxpath, 'lib', 'libjdmath.a')
subprocess.call(['gcc', '-I' + jdmath_h, jdmath_a,
'-shared', 'point.c', '-o', 'point.so'])
@base.Marx
def step_2(self):
'''run USER source'''
return {'SourceType': 'USER',
'UserSourceFile': os.path.join(self.basepath, 'point.so')}
@base.Marx2fits
def step_3(self):
'turn into fits file'
return '--pixadj=EDSER', 'point', 'point.fits'
@base.Ciao
def step_30(self):
'''ds9 images of the PSF'''
return ['''ds9 -width 800 -height 500 -log -cmap heat point.fits -pan to 4018 4141 physical -zoom 8 -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
|
Chandra-MARX/marx-test
|
tests/source.py
|
Python
|
gpl-2.0
| 10,336
|
[
"Gaussian"
] |
ea8c13b97a32dd76207cd0c2060cdd6e41499bffa6bc12d93a37605f0ec97c1b
|
#!/usr/bin/env python
# Tsung-Yi Lin <tl483@cornell.edu>
# Ramakrishna Vedantam <vrama91@vt.edu>
import copy
import math
from collections import defaultdict
import numpy as np
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in xrange(1, n + 1):
for i in xrange(len(words) - k + 1):
ngram = tuple(words[i:i + k])
counts[ngram] += 1
return counts
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs]
def cook_test(test, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
'''
return precook(test, n, True)
class CiderScorer(object):
"""CIDEr scorer.
"""
def copy(self):
''' copy the refs.'''
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
''' singular instance '''
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.document_frequency = defaultdict(float)
self.cook_append(test, refs)
self.ref_len = None
def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
self.ctest.append(cook_test(test)) ## N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
def size(self):
assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
return len(self.crefs)
def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new CiderScorer instances
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
def compute_doc_freq(self):
'''
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None
'''
for refs in self.crefs:
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram, count) in ref.iteritems()]):
self.document_frequency[ngram] += 1
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
def compute_cider(self):
def counts2vec(cnts):
"""
Function maps counts of ngram to vector of tfidf weights.
The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.
The n-th entry of array denotes length of n-grams.
:param cnts:
:return: vec (array of dict), norm (array of float), length (int)
"""
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram, term_freq) in cnts.iteritems():
# give word count 1 if it doesn't appear in reference corpus
df = np.log(max(1.0, self.document_frequency[ngram]))
# ngram index
n = len(ngram) - 1
# tf (term_freq) * idf (precomputed idf) for n-grams
vec[n][ngram] = float(term_freq) * (self.ref_len - df)
# compute norm for the vector. the norm will be used for computing similarity
norm[n] += pow(vec[n][ngram], 2)
if n == 1:
length += term_freq
norm = [np.sqrt(n) for n in norm]
return vec, norm, length
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
'''
Compute the cosine similarity of two vectors.
:param vec_hyp: array of dictionary for vector corresponding to hypothesis
:param vec_ref: array of dictionary for vector corresponding to reference
:param norm_hyp: array of float for vector corresponding to hypothesis
:param norm_ref: array of float for vector corresponding to reference
:param length_hyp: int containing length of hypothesis
:param length_ref: int containing length of reference
:return: array of score for each n-grams cosine similarity
'''
delta = float(length_hyp - length_ref)
# measure consine similarity
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
# ngram
for (ngram, count) in vec_hyp[n].iteritems():
# vrama91 : added clipping
val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram]
if (norm_hyp[n] != 0) and (norm_ref[n] != 0):
val[n] /= (norm_hyp[n] * norm_ref[n])
assert (not math.isnan(val[n]))
# vrama91: added a length based gaussian penalty
val[n] *= np.e ** (-(delta ** 2) / (2 * self.sigma ** 2))
return val
# compute log reference length
self.ref_len = np.log(float(len(self.crefs)))
scores = []
for test, refs in zip(self.ctest, self.crefs):
# compute vector for test captions
vec, norm, length = counts2vec(test)
# compute vector for ref captions
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
vec_ref, norm_ref, length_ref = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
# change by vrama91 - mean of ngram scores, instead of sum
score_avg = np.mean(score)
# divide by number of references
score_avg /= len(refs)
# multiply score by 10
score_avg *= 10.0
# append score of an image to the score list
scores.append(score_avg)
return scores
def compute_score(self, option=None, verbose=0):
# compute idf
self.compute_doc_freq()
# assert to check document frequency
assert (len(self.ctest) >= max(self.document_frequency.values()))
# compute cider score
score = self.compute_cider()
# debug
# print score
return np.mean(np.array(score)), np.array(score)
|
MeteorKepler/RICGA
|
ricga/eval_tools/pycocoevalcap/cider/cider_scorer.py
|
Python
|
apache-2.0
| 7,727
|
[
"Gaussian"
] |
6837a736c27f1029acb2a5ea8f723165bfa0f945dfb6e7f156038b7d2cf3d317
|
# -*- coding: utf-8 -*-
"""
Util functions
"""
import os, re, sys, traceback
import logging
logging.basicConfig(format='%(asctime)s: %(message)s', datefmt = "%H:%M:%S")
from glob import glob
from codecs import open as decode_open
import nltk
import nltk.data
import lxml
from pyquery import PyQuery as pq
from zipfile import ZipFile
import json
from ground_truth import (ARTICLES, PREPOSITIONS, CONJUNCTIONS)
from label import get_label
def get_file_names(paths=["/group/home/puls/Shared/capitalization-recovery/12"]):
"""
Get all document file paths
"""
for path in paths:
for doc_dir in glob(os.path.join(path, "*")):
for file_path in glob(os.path.join(doc_dir, "*")):
if (os.path.isfile(file_path) and "." not in os.path.basename(file_path)):
yield file_path
title_pos_regexp = re.compile(r"^(\d+) (\d+) Headline type main$")
def get_title_position(path):
"""
>>> get_title_position("/group/home/puls/Shared/capitalization-recovery/30/online.wsj.com.xml.rss.3_7031/3918A8D35025B47AC6A62D293F5F506F.paf")
(42, 77)
"""
with decode_open(path, "r", "utf8") as paf:
for line in paf:
match = title_pos_regexp.search(line)
if match:
# get title index
start = int(match.group(1))
end = int(match.group(2))
return start, end
raise Exception("Unable to find start and end position for %s" %path)
def extract_title(path):
"""
Given document file path
Extract the title
>>> extract_title("/group/home/puls/Shared/capitalization-recovery/12/www.ameinfo.com.rssfeeds.10660/DE01D30EA383DFD9FA1427CB9CC935F2")
u'Polaroid launches new range of products at opening day of GITEX Technology Week 2014'
>>> extract_title("/group/home/puls/Shared/capitalization-recovery/30/online.wsj.com.xml.rss.3_7031/3918A8D35025B47AC6A62D293F5F506F")
u'Bad Bets Rock Fortress\u2019s Macro Fund'
>>> extract_title("/group/home/puls/Shared/capitalization-recovery/30/feeds.foxbusiness.com.foxbusiness/E1D1899ED1CDEAB1574C1D279CBA2632")
u'Is Gold\u2019s Knockout Punch Coming?'
>>> extract_title("/group/home/puls/Shared/capitalization-recovery/30/www.streetinsider.com.freefeed.php/34D4137A7AEB5118C6E9EC451E66B529")
u'Solving IT Debuts on Staffing Industry Analysts\u2019 Top 100 Fastest-Growing U.S. Staffing and Talent Engagement Firms'
"""
start, end = get_title_position(path + ".paf")
with decode_open(path, "r", "utf8") as doc:
#extract the content
content = doc.read()
return "".join(content[start: end])
def get_document_content_paf(path):
"""
Content extractor for PAF file
Exclude the title and get the actual content of the document
>>> c = get_document_content_paf("/group/home/puls/Shared/capitalization-recovery/30/online.wsj.com.xml.rss.3_7031/3918A8D35025B47AC6A62D293F5F506F")
"""
_, end = get_title_position(path + ".paf")
with decode_open(path, "r", "utf8", "ignore") as doc:
content = doc.read()
return "".join(content[end:])
def get_title_and_content_by_paf(path):
"""
Return:
the content before the title,
the title
the body
"""
start, end = get_title_position(path + ".paf")
with decode_open(path, "r", "utf8", "ignore") as doc:
content = doc.read()
return ("".join(content[:start]),
"".join(content[start:end]),
"".join(content[end:]))
def get_document_content(path):
"""
Get the actual content of the document
>>> get_document_content("/home/group/puls/Shared/capitalization-recovery/reuters-text/sth.txt")
u'something'
"""
with decode_open(path, "r", "utf8", "ignore") as doc:
return doc.read()
def is_monocase(title_words):
"""
Determine if the title words are already capitalized
>>> is_monocase("Global Eagle Entertainment and SES Sign a Strategic Partnership to Deliver Global Ku-Band Satellite in-Flight Connectivity to Airlines".split())
True
>>> is_monocase("Agenda Released for the 17th annual Summit on Superbugs & Superdrugs".split())
False
>>> is_monocase("How Find Your Inner Martin Scorsese to Build Brand & Rule the World".split())
True
>>> is_monocase("Half of YouTube's Traffic is Now Coming From Mobile: CEO".split()) # `is`
True
>>> is_monocase("Crystal Bridges Announces 2015 Exhibits, Including Warhol, van Gogh, Pollock".split()) # `van`
True
"""
functional_words = ARTICLES | PREPOSITIONS | CONJUNCTIONS
words = [word
for word in title_words[1:]
if word not in functional_words]
labels = map(get_label, words)
ic = filter(lambda l: l == 'IC', labels)
al = filter(lambda l: l == 'AL', labels)
if (len(al) == 0 or
# some heuristic to check if it's relaly monocase
(len(ic) and
float(len(al)) / len(ic) <= 0.2
and len(al) <= 2 )):
return True
else:
return False
# Mapping for non-standard punctuations to standard ones
trans_mapping = {u'‘': u'\'',# (8216 ‘ #\LEFT_SINGLE_QUOTATION_MARK)
u'’': u'\'',# (8217 ’ #\RIGHT_SINGLE_QUOTATION_MARK)
u'❛': u'\'',# (10075 #\HEAVY_SINGLE_TURNED_COMMA_QUOTATION_MARK_ORNAMENT)
u'❜': u'\'',# (10076 #\HEAVY_SINGLE_COMMA_QUOTATION_MARK_ORNAMENT)
u'\u0092': u'\'', # (146 #\Private-Use-Two)
u'‛': u'\'', # (8219 ‛ #\SINGLE_HIGH-REVERSED-9_QUOTATION_MARK)
u'“': u'"', # (8220 “ #\LEFT_DOUBLE_QUOTATION_MARK)
u'”': u'"', # (8221 ” #\RIGHT_DOUBLE_QUOTATION_MARK)
u'‟': u'"', # (8223 ‟ #\DOUBLE_HIGH-REVERSED-9_QUOTATION_MARK)
u'❝': u'"', # (10077 ❝ #\HEAVY_DOUBLE_TURNED_COMMA_QUOTATION_MARK_ORNAMENT)
u'❞': u'"', # (10078 ❞ #\HEAVY_DOUBLE_COMMA_QUOTATION_MARK_ORNAMENT)
u'"': u'"', # (65282 " #\FULLWIDTH_QUOTATION_MARK)
u''': u'\'', # (65287 ' #\FULLWIDTH_APOSTROPHE)
u',': u',', # (65292 #\FULLWIDTH_COMMA)
u'_': u' ', # (95 UNDERSCORE)
u'–': u'-', # (8211 #\EN_DASH)
u'—': u'-', # (8212 #\EM_DASH)
}
trans_table = {ord(f): t
for f,t in trans_mapping.items()}
def normalize_title(s):
"""
>>> normalize_title(u'Bad Bets Rock Fortress\u2019s Macro Fund')
u"Bad Bets Rock Fortress's Macro Fund"
>>> normalize_title(u'Is Gold\u2019s Knockout Punch Coming?')
u"Is Gold's Knockout Punch Coming?"
>>> normalize_title(u'Judge finds flaw in Sacramento\u0092s arena review, but construction will continue')
u"Judge finds flaw in Sacramento's arena review, but construction will continue"
"""
try:
return s.translate(trans_table)
except:
msg = "Error processing : '%s'" %(s)
sys.stderr.write(msg)
traceback.print_exc(file=sys.stderr)
def get_reuter_file_paths(dirs = []):
"""
Get the zip file paths under the directories `dirs`
>>> paths = get_reuter_file_paths(["/group/home/puls/Shared/capitalization-recovery/RCV1/REUTERS_CORPUS_1/", "/group/home/puls/Shared/capitalization-recovery/RCV1/REUTERS_CORPUS_2/"])
>>> paths.next()
'/group/home/puls/Shared/capitalization-recovery/RCV1/REUTERS_CORPUS_1/19960824.zip'
"""
for d in dirs:
for path in glob("%s/*.zip" %d):
yield path
def zip_contents(zip_path):
"""
Get the file contents in the zip file
Return a generator of the file content
>>> g = zip_contents("/group/home/puls/Shared/capitalization-recovery/RCV1/REUTERS_CORPUS_1/19970101.zip")
>>> n, c = g.next() # get the content of the next file
>>> n
'282799newsML.xml'
"""
f=ZipFile(zip_path)
for name in f.namelist():
yield (name, f.read(name))
def save_content(content, original_file_path, target_directory = "/group/home/puls/Shared/capitalization-recovery/reuters-text/"):
"""
Save the content somewhere, return the saved path
>>> save_content("something", "/group/home/puls/Shared/capitalization-recovery/RCV1/REUTERS_CORPUS_1/sth.xml", target_directory = "/group/home/puls/Shared/capitalization-recovery/reuters-text/")
'/group/home/puls/Shared/capitalization-recovery/reuters-text/sth.txt'
>>> open("/group/home/puls/Shared/capitalization-recovery/reuters-text/sth.txt").read()
'something'
"""
original_file_name = os.path.basename(original_file_path)
file_name = original_file_name.split(".")[0] + ".txt"
content_path = os.path.join(target_directory, file_name)
with decode_open(content_path, "w", "utf8") as f:
f.write(content)
return content_path
def load_reuter_article(content):
"""
Given the Reuter xml file content, return:
- title
- document content
>>> n, c = zip_contents('/group/home/puls/Shared/capitalization-recovery/RCV1/REUTERS_CORPUS_1/19960824.zip').next()
>>> title, content = load_reuter_article(c)
>>> title
u'Harris says hikes dividend 12 pct. [CORRECTED 17:30 GMT, 26/08].'
>>> content[:9]
u'Electroni'
"""
try:
doc = pq(content)
except lxml.etree.ParserError:
raise ValueError("The xml content is invalid")
return unicode(doc.find("headline").text()), unicode(doc.find("text").text())
def prepare_reuter_data(reuter_data_dirs, content_dir):
"""
Prepare Reuter data in batch.
Extract the title and content in XML files and save the content somewhere,
meanwhile print the json data arrays
"""
zippaths = list(get_reuter_file_paths(reuter_data_dirs))[145:]
for i, zippath in enumerate(zippaths):
logging.error("%d / %d finished" %(i, len(zippaths)))
for path_name, xml_content in zip_contents(zippath):
try:
t, c = load_reuter_article(xml_content)
except ValueError:
logging.error("%s in %s has error" %(path_name, zippath))
pass
content_path = save_content(c, path_name, content_dir)
print json.dumps([content_path, unicode(t).encode("utf8")])
def clean_title_file(path):
with decode_open(path, "r", "utf8") as f:
for i,l in enumerate(f):
if i % 10000 == 0:
logging.error("%.4f completed", float(i) / 806792)
obj = json.loads(l)
words = nltk.word_tokenize(obj[1])
if is_monocase(words):
print l,
def load_crfsuite_format_data(file_obj):
"""
Load crfsuite format data and return data instances(each instance is a sentence)
Param:
------
file_obj: file-like object
Return:
------
list of list of dict: the feature values
list of list of str: the labels
>>> X, Y = load_crfsuite_format_data(open("test_data/labeled.crfsuite.txt", "r"))
>>> len(X), len(Y)
(3, 3)
>>> map(len, X)
[12, 8, 13]
>>> X[0][0]["word[0]|word[1]"]
"\'Messed|up"
>>> X[0][0]['__BOS__']
1
>>> X[0][0]['has-punct[0]']
1
>>> X[0][1]['has-punct[0]']
0
>>> map(len, Y)
[12, 8, 13]
>>> Y[0][0]
'MX'
"""
X = []
Y = []
words = []
labels = []
for l in file_obj:
word_features = {}
if len(l.strip()) == 0:
# we start a new sentence
X.append(words)
Y.append(labels)
words = []
labels = []
else:
segments = l.split('\t')
labels.append(segments[0])
for seg in segments[1:]:
if '=' in seg:
try:
feat_key, feat_val = seg.split("=", 1)
except ValueError:
feat_val = feat_val.strip()
if feat_val in ('True', 'False'):
word_features[feat_key] = (1
if feat_val == 'True'
else 0)
else:
word_features[feat_key] = feat_val
else:
word_features[seg.strip()] = 1
words.append(word_features)
return X, Y
if __name__ == "__main__":
# path = get_file_names()[0]
import doctest
doctest.testmod()
# prepare_reuter_data(["/group/home/puls/Shared/capitalization-recovery/RCV1/REUTERS_CORPUS_1/", "/group/home/puls/Shared/capitalization-recovery/RCV1/REUTERS_CORPUS_2/"],
# "/group/home/puls/Shared/capitalization-recovery/reuters-text/")
# clean_title_file("./reuters.txt")
|
xiaohan2012/capitalization-restoration-train
|
util.py
|
Python
|
mit
| 12,853
|
[
"CRYSTAL"
] |
1e97d92d56e9ac170e763aa31d6a409b9ea545ae46760f189eaee776d01794f8
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 05 16:19:42 2013
package contains standard x-ray related functions. Needs to be linked to database!!!
@author: lwiegart
version 0.1: included help function in all macros [12/01/2014]
version 0.1.1: added get_gap & get_Es
version 0.1.2: added get_EBragg
version 0.1.3: fixed error messages in get_gap and get_Es
version 0.1.4: added lattice spacings for Si111 and Si220 @80K (from XOP) (02/26/2015)
version 0.2.0: (this file) fixed some problems wiht arrays as inputs, e.g. in get_gap
and changed the help files to use the standard python 'help' and docstring functions;
added xfuncs_exeption class for handling xfuncs specific exeptions within python (03/23/2015)
added get_pinflux: calculates photon flux from photo current from pin diode
"""
import pylab as pl
import numpy as np
from os import listdir
from os.path import isfile, join
import re
# path to X-ray data files
#datapath='/nfs/xf11id/shared/xfuncs/X-ray_database/'
datapath='/home/xf11id/Repos/chxtools/chxtools/X-ray_database/'
def get_Lambda(E,u='SI'):
"""
calculates X-ray wavelength as a function of Energy [keV] in optional units.
Syntax: getLambda(E,u),
where E=X-ray energy; optional: u= 'A','nm','um','cm','mm','m','SI' (='m'), default in the absence of u: 'SI'
"""
hPlank=6.62606876e-34;
cvac=2.99792458e8;
Qelectron=1.602176463e-19;
scale=1
#l=hPlank*cvac/(E*1000*Qelectron)
l=hPlank*cvac/(E*1000*Qelectron);
if u is 'A':
scale=1e10;return l*scale # Angstroem
elif u is 'nm':
scale=1e9; return l*scale # nm
elif u is 'um':
scale=1e6; return l*scale # um
elif u is 'mm':
scale=1e3; return l*scale # mm
elif u is 'cm':
scale=1e2; return l*scale # cm
elif u is 'm' or u is 'SI':
scale=1; return l*scale
else:
print ('invalid option, type "get_Lambda(\'?\')" for available options and syntax')
def get_saxsQ(theta,E=8):
"""
written by LW 13/03/2008.
Calculates Q value for SAXS experiments [1/A],
input parameters: scattering angle theta [deg] (not theta/2!!!),
X-ray energy in keV (default: 8keV) function is vector compatible
calling sequence: get_saxsQ(theta,E)
"""
lam=get_Lambda(E,'A')
return 4*np.pi/lam*np.sin(np.rad2deg(theta/2));
def get_ac(material,E=8):
"""
by LW 10/03/2010
function calculates the critical angle for total external reflection as a function of
the material and the X-ray energy according to ac=sqrt(2*delta)
index of refraction is a .dat file from http://henke.lbl.gov/optical_constants/getdb2.html
(energy range: 2-30keV,delete the header % lines, name the file n_material.dat) %
calling sequence: ac=get_ac(material,E) where ac: critial angle in degrees, E [keV] (default: 8keV)
type get_ac(\'materilal?\') to show list of supported materials"
"""
#get list_of supported materials from data file directory:
xdatafiles = [ f for f in listdir(datapath) if isfile(join(datapath,f)) ]
name=[]
for i in range(0, np.size(xdatafiles)):
m=re.search('(?<=n_)\w+', xdatafiles[i])
if m is not None:
name.append(m.group(0))
E=np.array(E)
if material in name:
loadn=datapath+'n_'+material+'.dat'
n=pl.loadtxt(loadn,comments='%')
if np.min(E)>=np.min(n[:,0]/1000) and np.max(E)<=np.max(n[:,0]/1000):
d=np.interp(E*1000,n[:,0],n[:,1])
return np.degrees(np.sqrt(2*d))
else: print ('error: energy '+"%3.4f" %E +'[keV] out of range ('+"%3.4f" % np.min(n[:,0]/1000)+'=<E<='+"%3.4f" % np.max(n[:,0]/1000)+'keV)')
elif material=='material?':
print ('list of supported materials (based on data files in directory '+datapath+':')
print (name )
else: print ('error: non recognized material, please create index of refraction file first. Type "get_ac?" for instructions; type get_ac("material?") for list of supported materials')
def get_n(material,E=8):
"""
"by LW 07/04/2011 function get the index of refraction from stored data file,
index of refraction is a .dat file from http://henke.lbl.gov/optical_constants/getdb2.html
(energy range: 2-30keV,delete the header lines, name the file n_material.dat)
calling sequence: n=get_n(material,E) where n is the complex refractive index detlta-i*beta, E: X-ray energy in keV"
"""
#get list_of supported materials from data file directory:
xdatafiles = [ f for f in listdir(datapath) if isfile(join(datapath,f)) ]
name=[]
for i in range(0, np.size(xdatafiles)):
m=re.search('(?<=n_)\w+', xdatafiles[i])
if m is not None:
name.append(m.group(0))
E=np.array(E)
if material in name:
loadn=datapath+'n_'+material+'.dat'
n=pl.loadtxt(loadn,comments='%')
if np.min(E)>=np.min(n[:,0]/1000) and np.max(E)<=np.max(n[:,0]/1000):
d=np.interp(E*1000,n[:,0],n[:,1])
b=np.interp(E*1000,n[:,0],n[:,2])
return d-1j*b
else: print ('error: energy '+"%3.4f" %E +'[keV] out of range ('+"%3.4f" % np.min(n[:,0]/1000)+'=<E<='+"%3.4f" % np.max(n[:,0]/1000)+'keV)')
elif material=='material?':
print ('list of supported materials (based on data files in directory '+datapath+':')
print (name)
else: print ('error: non recognized material, please create index of refraction file first. Type "get_n?" for instructions; type get_n("material?") for list of supported materials')
def get_mu(material,E=8):
"""
by LW 07/04/2011
function gets the attenuation length from stored data file,
attenuation length is a .dat file from http://henke.lbl.gov/optical_constants/getdb2.html
(energy range: 2-30keV,delete the header lines or comment with '%', name the file n_material.dat)
calling sequence: mu=get_mu(material,E) where mu [MICRONS!!!] is the 1/e attenuation length, E: X-ray energy in keV'
"""
#get list_of supported materials from data file directory:
xdatafiles = [ f for f in listdir(datapath) if isfile(join(datapath,f)) ]
name=[]
for i in range(0, np.size(xdatafiles)):
mm=re.search('(?<=mu_)\w+', xdatafiles[i])
if mm is not None:
name.append(mm.group(0))
E=np.array(E)
if material in name:
loadn=datapath+'mu_'+material+'.dat'
m=pl.loadtxt(loadn,comments='%')
if np.min(E)>=np.min(m[:,0]/1000) and np.max(E)<=np.max(m[:,0]/1000):
mu=np.interp(E*1000,m[:,0],m[:,1])
return mu
else: print ('error: energy '+"%3.4f" %E +'[keV] out of range ('+"%3.4f" % np.min(m[:,0]/1000))+'=<E<='+"%3.4f" % np.max(m[:,0]/1000)+'keV)'
elif material=='material?':
print ('list of supported materials (based on data files in directory '+datapath+':')
print (name)
else: print ('error: non recognized material, please create index of refraction file first. Type get_mu("?") for instructions; type get_n("material?") for list of supported materials')
def get_T(material,E=8,l=1):
"""
by LW 10/03/2010,
function calculates the transmission as a function of the material and the X-ray energy according to e^(-mul),
where mu=4pi/lambda*beta
index of refraction is a .dat file from http://henke.lbl.gov/optical_constants/getdb2.html
(energy range: 2-30keV,delete the header lines, name the file n_material.dat)
calling sequence: T=get_T(material,E,l)
where T: transmission, material: E: X-ray energy in keV, l: thickness of the material [um],
either E or l can be vectors; type get_T(\"material?\") for a list of supported materials
"""
#get list_of supported materials from data file directory:
xdatafiles = [ f for f in listdir(datapath) if isfile(join(datapath,f)) ]
name=[]
for i in range(0, np.size(xdatafiles)):
m=re.search('(?<=n_)\w+', xdatafiles[i])
if m is not None:
name.append(m.group(0))
E=np.array(E)
l=np.array(l)
#if len(E)==1 or len(l)==1:
if E.size==1 or l.size==1:
if material in name:
loadn=datapath+'n_'+material+'.dat'
n=pl.loadtxt(loadn,comments='%')
if np.min(E)>=np.min(n[:,0]/1000) and np.max(E)<=np.max(n[:,0]/1000):
b=np.interp(E*1000,n[:,0],n[:,2])
mu=4*np.pi/get_Lambda(E,'um')*b;
return np.exp(-mu*l);
else: print ('error: energy '+"%3.4f" %E +'[keV] out of range ('+"%3.4f" % np.min(n[:,0]/1000)+'=<E<='+"%3.4f" % np.max(n[:,0]/1000)+'keV)')
elif material=='material?':
print ('list of supported materials (based on data files in directory '+datapath+':')
print (name)
else: print ('error: non recognized material, please create index of refraction file first. Type "get_T?" for instructions; type get_T("material?") for list of supported materials')
else: print ('error: either energy or length must be a scalar, cannot scan both energy and length at the same time.')
def get_Bragg(reflection,E=8.):
"""
by LW 17/03/2010
function return the Bragg angle [deg.] of a given material and reflection at a given Energy.
Calling sequence: thetaB=get_Bragg(reflection,E), thetaB(1)=Bragg angle[deg.] thetaB(2)=dhkl [A], thetaB(3)=I/Io [%].
E: x-ray energy in keV (can be an array of energies),
reflection: string, e.g. 'Si111'. Reflections implemented from http://database.iem.ac.ru/mincryst, T=25C or calculated from XOP, e.g for Si111&Si220 @80K
type get_Bragg(\'reflections?\') for a list of currently availabel materials;
"""
reflstr=['Si111cryo','Si220cryo','Si111', 'Si220', 'Si113', 'Si224', 'Si331', 'Si400','Ge111', 'Ge220', 'Ge113', 'Ge224', 'Ge331', 'Ge620', 'Ge531', 'Ge400', 'Ge115', 'Ge335','Ge440', 'Ge444', 'Ge333', 'C111', 'C220']
dspace=np.array([3.13379852,1.91905183,3.13542,1.92004,1.63742,1.10854,1.24589,1.35767,3.26627,2.00018,1.70576,1.15480,1.29789,0.89451,0.95627,1.41434,1.08876,0.86274,1.00009,0.81657,1.08876,2.05929,1.26105])
Irel=np.array([100,67.80,40.50,23.80,16.60,10.90,100,73.80,44.10,23.10,17.00,15.90,15.70,11.50,9.80,8.50,8.20,7.30,3.30,100,39.00])
if isinstance(reflection, basestring): # and all(isinstance(E, (int, long, float, complex)) for item in [E,E]): # <- bug in python: check for E is numeric works in standalone function, but not in this package => don't check
E=np.array(E)
lam=get_Lambda(E,'A')
if reflection in reflstr:
ind=reflstr.index(reflection)
print (reflstr[ind] +': d_{hkl}=' + "%3.4f" %dspace[ind] +'A I/I_o='+ "%3.4f" %Irel[ind])
theta=np.degrees(np.arcsin(lam/2/dspace[ind]))
ds=[];I=[]
for l in range(0,np.size(theta)):
ds.append(dspace[ind])
I.append(Irel[ind])
res=np.array([theta,np.array(ds),np.array(I)])
return res.T
elif reflection=='reflections?':
print ('List of available reflections (T=25C):')
print (reflstr )
else: print ('error: reflection not found. Type get_Bragg("reflections?") for list of available reflections.')
else: print ('error: reflection has to be a string and E needs to be numeric. Type get_Bragg? for help')
def get_EBragg(reflection,theta_Bragg=12.0):
"""
by LW 17/03/2010
function returns the X-ray energy [keV] of a given crystal and Bragg reflection.
Calling sequence: EBragg=get_EBragg(reflection,theta_Bragg (default: 12deg)), thetaB(1)=Bragg angle[deg.] thetaB(2)=dhkl [A], thetaB(3)=I/Io [%].
E: x-ray energy in keV (can be an array of energies),
reflection: string, e.g. 'Si111'. Reflections implemented from http://database.iem.ac.ru/mincryst, T=25C or calculated from XOP, e.g. for Si111 and Si220 @80K
type get_Bragg(\'reflections?\') for a list of currently availabel materials
"""
reflstr=['Si111cryo','Si220cryo','Si111', 'Si220', 'Si113', 'Si224', 'Si331', 'Si400','Ge111', 'Ge220', 'Ge113', 'Ge224', 'Ge331', 'Ge620', 'Ge531', 'Ge400', 'Ge115', 'Ge335','Ge440', 'Ge444', 'Ge333', 'C111', 'C220']
dspace=np.array([3.13379852,1.91905183,3.13542,1.92004,1.63742,1.10854,1.24589,1.35767,3.26627,2.00018,1.70576,1.15480,1.29789,0.89451,0.95627,1.41434,1.08876,0.86274,1.00009,0.81657,1.08876,2.05929,1.26105])
Irel=np.array([100,67.80,40.50,23.80,16.60,10.90,100,73.80,44.10,23.10,17.00,15.90,15.70,11.50,9.80,8.50,8.20,7.30,3.30,100,39.00])
# constants needed:
hPlank=6.62606876e-34
cvac=2.99792458e8
Qelectron=1.602176463e-19
if isinstance(reflection, basestring): # and all(isinstance(E, (int, long, float, complex)) for item in [E,E]): # <- bug in python: check for E is numeric works in standalone function, but not in this package => don't check
theta_Bragg=np.array(theta_Bragg)
if reflection in reflstr:
ind=reflstr.index(reflection)
#print reflstr[ind] +': d_{hkl}=' + "%3.4f" %dspace[ind] +'A I/I_o='+ "%3.4f" %Irel[ind]
ds=[];I=[]
for l in range(0,np.size(theta_Bragg)):
ds.append(dspace[ind])
I.append(Irel[ind])
dspace=np.array(ds)
lam=2*dspace*np.sin(theta_Bragg/180*np.pi)
EthetaB=hPlank*cvac/(lam*Qelectron)*1e7;
return EthetaB.T
elif reflection=='reflections?':
print ('List of available reflections (T=25C):')
print (reflstr )
else: print ('error: reflection not found. Type get_EBragg("reflections?") for list of available reflections.')
else: print ('error: reflection has to be a string and E needs to be numeric. Type get_Bragg? for help')
def get_Qpixel(center=[0,0],cord=[100,100],dis=1000,ps=75,E=8):
"""
written by LW 13/03/2008.
calculates Q [1/A] value for SAXS experiments from detector position,
input parameters: position of the direct beam [x,y] in pixel numbers, pixel coordinate to calculte Q [x y],
sample detector distance in mm, pixel size [um], X-ray energy in keV.
function is vector compatible for parameters dis OR ps OR E.
calling sequence: get_Qpixel([xc,yc],[x,y],dis,ps,E)
"""
center=np.array(center);cord=np.array(cord);dis=np.array(dis);ps=np.array(ps);E=np.array(E)
lam=get_Lambda(E,'A')
xc=center[0]
yc=center[1]
x=cord[0]
y=cord[1]
ps=ps/1000 #convert pixel size to mm
#real space distance on the CCD [mm]:
u=np.sqrt(((x-xc)**2+(y-yc)**2))*ps
#wavevector transfer [1/A]
return 4*np.pi/lam*np.sin(np.arctan(u/dis/2))
def get_2theta(Qxy, E=8):
"""
by LW 16/06/2005
Returns the 2theta angle [deg] as a function of the input prameters Qxy [1/A] and Energy.
Type get_2theta(Qxy [1/A], E [keV] (default: 8keV) ). This function is vector compatible.
"""
Qxy=np.array(Qxy);E=np.array(E)
lam=get_Lambda(E,'A')
return np.degrees(2*np.arcsin(Qxy/(4*np.pi)*lam))
def get_Gam(Qz,alpha_i=.12,E=8):
hlpstr="by sof 01/12/2003 Returns the angle Gamma (out-of-plane angle in GIXD) defined in terms of Qz, the incident angle and the energy of the X-Ray beam. Type getGam(Qz [1/A], Incident Angle [deg] (default: 0.12deg) , E[keV] (default:8keV)), This function is vector compatible. Type get_Gam(\'?\') for help"
if Qz=='?':
print (hlpstr)
else:
Qz=np.array(Qz);alpha_i=np.array(alpha_i);E=np.array(E)
lam=get_Lambda(E,'A')
return np.degrees(lam*Qz/(2*np.pi)-np.sin(alpha_i))
def get_Qll(Ty,alpha_i=.12,d=1000,E=8):
hlpstr="LW 26-01-2005 Function returns the parallel wavevector transfer in GI-XPCS geometry [cm^-1]. Type: get_Qll(Ty[mm],alpha_i[deg] (default: 0.12deg), d [mm] (default: 1000mm),E[keV] (default:8keV))=>qll[cm^-1]; type get_Qll(\'?\') for help"
if Ty=='?':
print ( hlpstr)
else:
Ty=np.array(Ty);alpha_i=np.array(alpha_i);E=np.array(E)
lam=get_Lambda(E,'A')
af=np.arctan((d*np.tan(np.deg2rad(alpha_i))+Ty)/d)
return (2*np.pi/lam)*(np.cos(np.deg2rad(alpha_i))-np.cos(af))*1e8
def get_Qxy(theta,E=8):
hlpstr="sof 26/11/2003 To return for in plane X-ray scattering the parallel wavevector transfer in 1/A. Type get_Qxy(FULL scattering angle [deg], E [keV] (default:8keV), the function returns the Q vector [1/A] in the surface plane. Angle measured in surface from specular direction! This function is vector compatible. Type get_QXY(\'?\') for help."
if theta=='?':
print (hlpstr)
else:
theta=np.array(theta);E=np.array(E)
lam=get_Lambda(E,'A')
return 4*np.pi*np.sin(np.deg2rad(theta)/2)/lam
def get_Qz(Gam,alpha_i=.12,E=8):
hlpstr="function by sof 01/12/2003 Returns the Qz z-component of wavevector transfer defined in terms of the incident and measured angles and the energy of the X-Ray beam. Type get_Qz(Gam [deg], alpha_i [deg] (default: 0.12deg), E[keV] (default: 8keV)). This function is vector compatible. Type get_Qz(\'?\') for help."
if Gam=='?':
print (hlpstr)
else:
E=np.array(E);Gam=np.deg2rad(np.array(Gam));alpha_i=np.deg2rad(np.array(alpha_i))
lam=get_Lambda(E,'A')
return 2*np.pi*(np.sin(Gam)+np.sin(alpha_i))/lam
def ch2deg(chn,channel_per_degree=100,chOff=0):
"""
LW 08-07-2005
function converts the PSD channel (chn) into angles [deg],
needed are the calibration parameters channels/deg (channel_per_degree)
and the channel number of the direct beam (chOff).
Calling sequence: ch2deg(chan,channel_per_degree (default: 100),chOff (default=0) -> corresponding angle [deg].
Function is vector compatible.
"""
chn=np.array(chn);channel_per_degree=np.array(channel_per_degree);chOff=np.array(chOff)
return (chn-chOff)/channel_per_degree
def get_Vineyard(Qz,E=8,ra0=1):
"""
by LW 20/07/2006
function calculates the Vineyard (transmission) Function for a given (range) of Qz(s)
and given X-ray energy and scattering length density ra0 of the subphase media.
Type: getVineyard(Qz [A], E [keV] (default: 8keV), ra0 [SI] (default: 1 ).
"""
lam=get_Lambda(E,'A')
Qc=4*np.pi*np.sin(np.sqrt(np.pi*ra0)*lam/np.pi)/lam
rx=Qz/Qc
V=np.ones(np.size(Qz))
for i in range(0,np.size(Qz)):
if rx[i]<=1:
V[i]=2*rx[i]
else: V[i]=2*rx[i]/(rx[i]+np.sqrt(rx[i]**2-1))
return V
def get_gap(E,harmonic=3,ID='CHX_IVU20_12202014'):
"""
by LW 12/03/2014, function calculates the undulator gap for a requested energy
and harmonic based on magnetic measurement data in the database
type get_gap(E [kev], harmonic [integer] (default=3), id (default+'CHX_IVU20_12202014'));
E can be an array of energies. Type get_gap\"ID?\") for a list of available magnetic datasets.
"""
#get list_of available magnetic measurements from data file directory:
xdatafiles = [ f for f in listdir(datapath) if isfile(join(datapath,f)) ]
name=[]
for i in range(0, np.size(xdatafiles)):
m=re.search('(?<=id_)\w+', xdatafiles[i])
if m is not None:
name.append(m.group(0))
if E=='ID?':
print ('list of available magnetic measurements (based on data files in directory '+datapath+':')
print (name)
else:
E=np.array(E)*1.0
harm_check(harmonic)
if ID in name:
loadn=datapath+'id_'+ID+'.dat'
magdat=pl.loadtxt(loadn,comments='%')
#harmonic=harmonic*1.0
if np.min(E/harmonic)>=np.min(magdat[:,2]) and np.max(E/harmonic)<=np.max(magdat[:,2]):
gap=np.interp(E/harmonic,magdat[:,2],magdat[:,0])
return gap
# this else should be a warning only and should return NaN
else: raise xfuncs_Exception ('error: energy '+"%3.4f" %E +'[keV] out of range for requested harmonic number, gap limit: ('+"%3.4f" % np.min(magdat[:,0])+'=<gap<='+"%3.4f" % np.max(magdat[:,0])+'mm), try using higher/lower harmonic number.')
else: raise xfuncs_Exception('error: non recognized magnetic data. Type get_gap(\'ID?\') for a list of available mangetic datasets.')
def get_Es(gap,harmonic=[1,2,3,4,5],ID='CHX_IVU20_12202014'):
"""
by LW 12/03/2014,
function calculates the X-ray energies for a given undulator gap and set of harmonics
based on magnetic measurement data in the database
type get_Es(gap [mm], harmonic [integer] (default=[1,2,3,4,5]), id (default:'CHX_IVU20_12202014'))
harmonic can be a list of integers. Type get_Es(\"ID?\") for a list of available magnetic datasets
"""
#get list_of available magnetic measurements from data file directory:
xdatafiles = [ f for f in listdir(datapath) if isfile(join(datapath,f)) ]
name=[]
for i in range(0, np.size(xdatafiles)):
m=re.search('(?<=id_)\w+', xdatafiles[i])
if m is not None:
name.append(m.group(0))
if gap=='ID?':
print ('list of available magnetic measurements (based on data files in directory '+datapath+':')
print (name)
else:
for l in range(0, np.size(harmonic)):
harm_check(harmonic)
if l==np.size(harmonic)-1:
gap=np.array(gap)
if ID in name:
loadn=datapath+'id_'+ID+'.dat'
magdat=pl.loadtxt(loadn,comments='%')
harmonic=np.array(harmonic)*1.0
if np.min(gap)>=np.min(magdat[:,0]) and np.max(gap)<=np.max(magdat[:,0]):
Es=np.interp(gap,magdat[:,0],magdat[:,2])
Eharmonics=np.array([harmonic,harmonic*Es])
return Eharmonics.T
else: print ('error: gap '+"%3.4f" % gap +'[mm] out of range for gap ('+"%3.4f" % np.min(magdat[:,0])+'=<gap<='+"%3.4f" % np.max(magdat[:,0])+'mm), try higher/lower harmonic number.')
else: print ('error: non recognized magnetic data. Type get_gap(\'ID?\') for a list of available mangetic datasets.')
def get_pinflux(current,Energy,thickness=300):
"""
by LW 03/26/2015
function to calculate photon flux from pin-diode measurement
uses scattering cross section for Si from NIST -> PhElAbsCross_si.dat in database
assumes Silicon as material as other materials would be pretty exotic
calling sequence: get_pinflux(current,Energy,thickness=300)
current: current [A]
Energy: X-ray energy [keV]
thickness: Si diode thickness [um]
"""
# some conversions and contstants:
rho_Si=2.329 # density of Si in g/cm^3!!!
epsilon=3.66 # energy for creation of electron-hole pair in Si [eV]
thickness=thickness/1.0E4 #conversion to cm
# read datafile with scattering cross section:
loadn=datapath+'PhElAbsCross_Si.dat'
crossdat=pl.loadtxt(loadn,comments='%')
# check for energy range:
xmin=min(crossdat[:,0])*1E3
xmax=max(crossdat[:,0])*1E3
Energy=np.array(Energy)*1.0
current=np.array(current)*1.0
if np.max(Energy) > xmax or np.min(Energy) <xmin:
raise xfuncs_Exception(['X-ray energy out of range for cross section data. ',xmin,'<=Energy<=,',xmax,' [keV]'])
# calculate photo current PER 1E10 ph/s:
PhCur=1E10*crossdat[:,0]*1e3*1.6022e-16*(1-np.exp(-crossdat[:,1]*thickness*rho_Si))/epsilon
PhCurint=np.interp(Energy,crossdat[:,0],PhCur)
photon_flux=current/PhCurint*1E10
print ('photo current for E= ',Energy,'keV: ',PhCurint*1E3,'mA/10^10ph/s')
print ('flux for photo current ',current*1E3,'mA at E=',Energy,'keV: ',photon_flux,'ph/s')
return photon_flux
####### Help functions
class xfuncs_Exception(Exception):
pass
"""
by LW 03/19/2015
class to raise xfuncs specific exceptions
"""
def harm_check(harm_n):
"""
by LW 03/19/2015
simple function to check whether single value or list of values only contains
integers >=1 (number of undulator harmonics)
calling sequence: harm_check(harm_n), where harm_n is a single value or list of values
function raises an exception is harm_n does contain values other than integers >=1
"""
state=True
if np.all(np.array(harm_n)>=1)!=True:
raise xfuncs_Exception("Type Error: Harmonic numbers need to be >=1!")
try:
list(harm_n) # b is the list to be checked
for i in list(harm_n):
#print isinstance(i,int)
state=state*isinstance(i,int)
except: # check for b being a single integer
try:
state=state*isinstance(harm_n, int)
#print isinstance(harm_n, int)
except: pass
if state !=True:
raise xfuncs_Exception("Type Error: Harmonic numbers need to be integers >=1!")
|
sameera2004/chxtools
|
chxtools/xfuncs.py
|
Python
|
bsd-3-clause
| 25,731
|
[
"CRYSTAL"
] |
8b37ea98e8b9329e13f2d0eb6bc2c07dfe471f395002a31b84cfb7ce13df4cd4
|
import mango.utils
from .Gaussian import *
import copy
import re
import sys
import scipy as sp
import numpy as np
import scipy.stats
from mango import mpi
try:
from mango.fmm._PwGaussianMixelMixtureModel import csvParsePgmmm
except:
csvParsePgmmm = None
logger, rootLogger = mpi.getLoggers(__name__)
class MixtureModel:
def __init__(self):
self.distList = []
def size(self):
return len(self.distList)
def append(self, dist):
self.distList.append(dist)
def clear(self):
self.distList = []
def pdf(self, x):
return sum([d.wpdf(x) for d in self.distList])
def cdf(self, x):
return sum([d.wcdf(x) for d in self.distList])
def evalBinProbabilities(self, binEndPts):
c = self.cdf(binEndPts)
return c[1:]-c[0:c.size-1]
def sort(self, cmp):
self.distList.sort(cmp)
def getGroupings(self):
return None
def __getitem__(self, i):
return self.distList[i]
def __iter__(self):
return iter(self.distList)
def getMixtureWeightArray(self, dtype="float64"):
return sp.array([d.getWeight() for d in self.distList], dtype=dtype)
def normaliseWeights(self, dtype="float64"):
wf = sp.sum(self.getMixtureWeightArray(dtype))
for d in self.distList:
d.setWeight(d.getWeight()/wf)
class WeightedMixtureModel (MixtureModel, WeightedDistribution):
def __init__(self, weight=1.0):
MixtureModel.__init__(self)
WeightedDistribution.__init__(self, weight)
class GaussianMixtureModel1d (MixtureModel):
def __init__(self, means=None, stddevs=None, mixWeights=None):
MixtureModel.__init__(self)
if ((means != None) or (stddevs != None) or (mixWeights != None)):
for i in range(len(means)):
m = 0
s = 1
w = 1
try:
m = means[i]
except:
m = 0
try:
s = stddevs[i]
except:
s = 1
try:
w = mixWeights[i]
except:
w = 1
self.append(WeightedGaussian1d(mean=m, stddev=s, weight=w))
def sort(self, cmp=None):
if (cmp == None):
cmp = gaussian1dCompare
MixtureModel.sort(self, cmp)
def getMeanArray(self, dtype="float64"):
return sp.array([d.getMean() for d in self.distList], dtype=dtype)
def getVarianceArray(self, dtype="float64"):
return sp.array([d.getVariance() for d in self.distList], dtype=dtype)
def getStandardDeviationArray(self, dtype="float64"):
return sp.array([d.getStandardDeviation() for d in self.distList], dtype=dtype)
def getMixtureWeightArray(self, dtype="float64"):
return sp.array([d.getWeight() for d in self.distList], dtype=dtype)
def getParameterList(self):
return [self.getMeanArray(), self.getStandardDeviationArray(), self.getMixtureWeightArray()]
def __str__(self):
s = ""
if (self.size() > 0):
mn = self.getMeanArray()
sd = self.getStandardDeviationArray()
wt = self.getMixtureWeightArray()
s = "%9.2f" % mn[0]
for mean in mn[1:]:
s += ", %9.2f" % mean
for stddev in sd:
s += ", %9.2f" % stddev
for weight in wt:
s += ", %10.8f" % weight
return s
def setParameters(self, means, stddevs, mixWeights):
self.clear()
for i in range(len(means)):
self.append(WeightedGaussian1d(mean=means[i], stddev=stddevs[i], weight=mixWeights[i]))
def rvs(self, size=1):
numDist = self.size()
prms = sp.transpose(sp.array([[dist.getMean(), dist.getStandardDeviation(), dist.getWeight()] for dist in self.distList]))
idx = np.dot(np.random.multinomial(1, prms[2,:], size=size),np.arange(numDist))
rvs = scipy.zeros((size,), dtype=prms.dtype)
for dIdx in range(numDist):
msk = sp.where(idx == dIdx)
numSamp = sp.sum(sp.where(idx == dIdx, 1, 0))
rvs[msk] = scipy.stats.norm.rvs(loc=prms[0, dIdx], scale=prms[1,dIdx], size=numSamp)
return rvs
def groupMixtureModel(ungroupedMixModel, groupMixIdxDict):
grpMixModel = MixtureModel()
grpIdxList = groupMixIdxDict.keys()
grpIdxList.sort()
for grpIdx in grpIdxList:
wMixDist = WeightedMixtureModel()
weight = 0.0
for clsIdx in groupMixIdxDict[grpIdx]:
weight += ungroupedMixModel[clsIdx].getWeight()
wMixDist.append(ungroupedMixModel[clsIdx])
wMixDist.normaliseWeights()
wMixDist.setWeight(weight)
grpMixModel.append(wMixDist)
return grpMixModel
def csvParseGaussianMixtureModels(csvString):
rootLogger.info("Parsing Gaussian Mixture Model...")
lineList = csvString.split("\n")
lineIdx = 0
line = lineList[lineIdx].strip()
headerLineRegEx = re.compile(".*mean.*,.*st.*dev.*,.*weight.*")
while ((lineIdx < len(lineList)) and (headerLineRegEx.match(line) == None)) :
lineIdx += 1
line = lineList[lineIdx].strip()
if (lineIdx >= len(lineList)):
raise RuntimeError("Could not find header line match for reg-ex %s " % headerLineRegEx)
mixList = [GaussianMixtureModel1d()]
prmLineRegEx = re.compile("\s*([^\s,]*)\s*,\s*([^\s,]*)\s*,\s*([^\s,]*)\s*.*")
while ((lineIdx+1) < len(lineList)):
lineIdx += 1
line = lineList[lineIdx].strip()
if (len(line) > 0):
mtch = prmLineRegEx.match(line)
if (mtch != None):
logger.debug("%s" % (mtch.groups(),))
mixList[-1].append(
WeightedGaussian1d(
mean = float(mtch.group(1)),
stddev = float(mtch.group(2)),
weight = float(mtch.group(3))
)
)
else:
mixList.append(GaussianMixtureModel1d())
if (mixList[-1].size() == 0):
mixList = mixList[0:-1]
return mixList
def csvParseMixtureModels(csvString):
lineList = csvString.split("\n")
lineIdx = 0
line = lineList[lineIdx].strip()
pgmmmHeaderLineRegEx = re.compile(".*pgmmm-mean.*,.*pgmmm-st.*dev.*,.*pgmmm-weight.*")
gmmHeaderLineRegEx = re.compile(".*mean.*,.*st.*dev.*,.*weight.*")
regExPairList = [(pgmmmHeaderLineRegEx, csvParsePgmmm), (gmmHeaderLineRegEx, csvParseGaussianMixtureModels)]
matchIdx = None
while ((matchIdx == None) and (lineIdx < len(lineList))):
for pairIdx in range(len(regExPairList)):
if ((matchIdx == None) and (regExPairList[pairIdx][0].match(line) != None)):
matchIdx = pairIdx
if (matchIdx == None):
lineIdx += 1
line = lineList[lineIdx].strip()
if (lineIdx >= len(lineList)):
raise RuntimeError("Could not find header line match for any reg-ex in %s " % str([p[0] for p in regExPairList]))
mm = None
if (regExPairList[matchIdx][1] != None):
mm = regExPairList[matchIdx][1](csvString)
else:
raise ValueError("Function not available to parse mixture model with header:\n" + line)
return mm
def calcGaussianMixtureModel1dTruncRange(mixModel, weightTruncPercent, numStdDev=2.4):
mixModel = copy.deepcopy(mixModel)
mixModel.sort(weightCompare)
percent = 0.0
minT = sys.float_info.max
maxT = sys.float_info.min
for i in range(0, mixModel.size()):
dist = mixModel[i]
percent += 100.0*dist.getWeight()
mn = dist.getMean() - numStdDev*dist.getStandardDeviation()
mx = dist.getMean() + numStdDev*dist.getStandardDeviation()
if (mn < minT):
minT = mn
if (mx > maxT):
maxT = mx
if (percent > weightTruncPercent):
break
return (minT, maxT)
|
pymango/pymango
|
misc/python/mango/fmm/MixtureModel.py
|
Python
|
bsd-2-clause
| 8,154
|
[
"Gaussian"
] |
d3f5824dd9c87b311b853d869bff14075bcf3b16f5213c94c9ab9a57ae2a45ce
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009-2010 Benny Malengier <benny.malengier@gramps-project.org>
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Tim Lyons
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Report output generator for html documents, based on Html and HtmlBackend
"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import os
import shutil
import time
from gramps.gen.ggettext import gettext as _
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.utils.image import resize_to_jpeg
from gramps.gen.const import DATA_DIR, WEBSTUFF_IMAGE_DIR, PROGRAM_NAME, URL_HOMEPAGE, VERSION
from gramps.gen.plug.docgen import BaseDoc, TextDoc, FONT_SANS_SERIF, URL_PATTERN
from gramps.plugins.lib.libhtmlbackend import HtmlBackend, process_spaces
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".htmldoc")
_TEXTDOCSCREEN = 'grampstextdoc.css'
_HTMLSCREEN = 'grampshtml.css'
#------------------------------------------------------------------------
#
# Set up to make links clickable
#
#------------------------------------------------------------------------
_CLICKABLE = r'''<a href="\1">\1</a>'''
#------------------------------------------------------------------------
#
# HtmlDoc
#
#------------------------------------------------------------------------
class HtmlDoc(BaseDoc, TextDoc):
"""Implementation of the BaseDoc and TextDoc gen.plug.docgen api for the
creation of Html files. This is achieved by writing on a HtmlBackend
object
div id's defined here:
id="grampstextdoc" : the entire text report
id="grampsheading" : a small defined heading, but not h1 to h6 !
id="grampsstylednote" : start of part with a styled note, divided in
paragraphs
id="grampsnote" : start of part with a note. This id is normally not
used
The styles as defined in the stylesheed of the textdoc, will be converted
to css class. Color is removed to avoid conflicts with the css. Also
Fontface is removed. Size, italic, bold, margins, borders are retained
"""
def __init__(self, styles, paper_style):
BaseDoc.__init__(self, styles, None)
self.style_declaration = ''
self.htmllist = []
self._backend = None
self.css_filename = ''
self.warn_dir = True
self._col = 0
self._tbl = None
self._empty = 1
self.title = ''
self.__title_written = -1 # -1 = not written, 0 = writing, 1 = written
self.__link_attrs = {} # additional link attrs, eg {"style": "...", "class": "..."}
self.use_table_headers = False # th, td
def set_css_filename(self, css_filename):
"""
Set the css file to use. The path must be included.
Note: DocReportDialog sets this for html doc
"""
if css_filename and os.path.basename(css_filename):
self.css_filename = css_filename
else:
self.css_filename = ''
def open(self, filename):
"""
Overwrite base method
"""
self._backend = HtmlBackend(filename)
self._backend.open()
self.htmllist += [self._backend.html_body]
#start a gramps report
self.htmllist += [Html('div', id="grampstextdoc")]
self.build_header()
def build_header(self):
"""
Build up the header of the html file over the defaults of Html()
"""
# add additional meta tags and stylesheet links to head section
# create additional meta tags
_meta1 = 'name="generator" content="%s %s %s"' % (PROGRAM_NAME,
VERSION, URL_HOMEPAGE)
meta = Html('meta', attr = _meta1)
#set styles of the report as inline css
self.build_style_declaration()
# GRAMPS favicon en css
fname1 = '/'.join([self._backend.datadir(), 'favicon.ico'])
fname2 = '/'.join([self._backend.datadir(), _TEXTDOCSCREEN])
fname3 = '/'.join([self._backend.datadir(), _HTMLSCREEN])
# links for GRAMPS favicon and stylesheets
links = Html('link', rel='shortcut icon', href=fname1,
type='image/x-icon') + (
Html('link', rel='stylesheet', href=fname2, type='text/css',
media='screen', indent=False),)
if self.css_filename:
links += (Html('link', rel='stylesheet', href=fname3,
type='text/css', media='screen', indent=False),
)
self._backend.html_header += (meta, links)
def build_style_declaration(self, id="grampstextdoc"):
"""
Convert the styles of the report into inline css for the html doc
"""
styles = self.get_style_sheet()
text = []
for sname in styles.get_cell_style_names():
style = styles.get_cell_style(sname)
pad = "%.3fcm" % style.get_padding()
top = bottom = left = right = 'none'
if style.get_top_border():
top = 'thin solid #000000'
if style.get_bottom_border():
bottom = 'thin solid #000000'
if style.get_left_border():
left = 'thin solid #000000'
if style.get_right_border():
right = 'thin solid #000000'
text.append('#%s .%s {\n'
'\tpadding: %s %s %s %s;\n'
'\tborder-top:%s; border-bottom:%s;\n'
'\tborder-left:%s; border-right:%s;\n}'
% (id, sname, pad, pad, pad, pad, top, bottom,
left, right))
for style_name in styles.get_paragraph_style_names():
style = styles.get_paragraph_style(style_name)
font = style.get_font()
font_size = font.get_size()
#font_color = '#%02x%02x%02x' % font.get_color()
align = style.get_alignment_text()
text_indent = "%.2f" % style.get_first_indent()
right_margin = "%.2f" % style.get_right_margin()
left_margin = "%.2f" % style.get_left_margin()
top_margin = "%.2f" % style.get_top_margin()
bottom_margin = "%.2f" % style.get_bottom_margin()
top = bottom = left = right = 'none'
if style.get_top_border():
top = 'thin solid #000000'
if style.get_bottom_border():
bottom = 'thin solid #000000'
if style.get_left_border():
left = 'thin solid #000000'
if style.get_right_border():
right = 'thin solid #000000'
italic = bold = ''
if font.get_italic():
italic = 'font-style:italic; '
if font.get_bold():
bold = 'font-weight:bold; '
#if font.get_type_face() == FONT_SANS_SERIF:
# family = '"Helvetica","Arial","sans-serif"'
#else:
# family = '"Times New Roman","Times","serif"'
# do not allow color, set in base css !
# so no : 'color: %s' % font_color
# so no : 'font-family:%s;' % family
text.append('#%s .%s {\n'
'\tfont-size: %dpt;\n'
'\ttext-align: %s; text-indent: %scm;\n'
'\tmargin-right: %scm; margin-left: %scm;\n'
'\tmargin-top: %scm; margin-bottom: %scm;\n'
'\tborder-top:%s; border-bottom:%s;\n'
'\tborder-left:%s; border-right:%s;\n'
'\t%s%s\n}'
% (id, style_name, font_size,
align, text_indent,
right_margin, left_margin,
top_margin, bottom_margin,
top, bottom, left, right,
italic, bold))
self.style_declaration = '\n'.join(text)
def close(self):
"""
Overwrite base method
"""
while len(self.htmllist)>1 :
self.__reduce_list()
#now write the actual file
self._backend.close()
self.write_support_files()
def copy_file(self, from_fname, to_fname, to_dir=''):
"""
Copy a file from a source to a (report) destination.
If to_dir is not present, then the destination directory will be created.
Normally 'to_fname' will be just a filename, without directory path.
'to_dir' is the relative path name in the destination root. It will
be prepended before 'to_fname'.
"""
#build absolute path
dest = os.path.join(self._backend.datadirfull(), to_dir, to_fname)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if from_fname != dest:
shutil.copyfile(from_fname, dest)
elif self.warn_dir:
from gramps.gui.dialog import WarningDialog
WarningDialog(
_("Possible destination error") + "\n" +
_("You appear to have set your target directory "
"to a directory used for data storage. This "
"could create problems with file management. "
"It is recommended that you consider using "
"a different directory to store your generated "
"web pages."))
self.warn_dir = False
def write_support_files(self):
"""
Copy support files to the datadir that needs to hold them
"""
#css of textdoc styles
tdfile = open(os.path.join(self._backend.datadirfull(),
_TEXTDOCSCREEN), 'w')
tdfile.write(self.style_declaration)
tdfile.close()
#css file
if self.css_filename:
#we do an extra check in case file does not exist, eg cli call
fullpath = os.path.join(DATA_DIR, self.css_filename)
if os.path.exists(fullpath):
self.copy_file(fullpath, _HTMLSCREEN)
#favicon
self.copy_file(os.path.join(WEBSTUFF_IMAGE_DIR, 'favicon.ico'),
'favicon.ico')
def __reduce_list(self):
"""
Takes the internal list of html objects, and adds the last to the
previous. This closes the upper tag
"""
self.htmllist[-2] += self.htmllist[-1]
self.htmllist.pop()
def __write_text(self, text, mark=None, markup=False, links=False):
"""
@param text: text to write.
@param mark: IndexMark to use for indexing (not supported)
@param markup: True if text already contains markup info.
Then text will no longer be escaped
@param links: make URLs clickable if True
"""
if not markup:
text = self._backend.ESCAPE_FUNC()(text)
if self.__title_written == 0 :
self.title += text
if links == True:
import re
text = re.sub(URL_PATTERN, _CLICKABLE, text)
self.htmllist[-1] += text
def __empty_char(self):
"""
Output a non breaking whitespace so as to have browser behave ok on
empty content
"""
self.__write_text(' ', markup=True)
def write_text(self, text, mark=None, links=False):
"""
Overwrite base method
"""
if text != "":
self._empty = 0
self.__write_text(text, mark, links=links)
def write_title(self):
"""
Add title field to header
"""
self._backend.html_header += Html('title', self.title,
inline=True)
def start_table(self, name, style):
"""
Overwrite base method
"""
self.first_row = True
styles = self.get_style_sheet()
self._tbl = styles.get_table_style(style)
self.htmllist += [Html('table', width=str(self._tbl.get_width())+'%',
cellspacing='0')]
def end_table(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_row(self):
"""
Overwrite base method
"""
self.htmllist += [Html('tr')]
self._col = 0
def end_row(self):
"""
Overwrite base method
"""
self.first_row = False
self.__reduce_list()
def start_cell(self, style_name, span=1):
"""
Overwrite base method
"""
if self.use_table_headers and self.first_row:
tag = "th"
else:
tag = "td"
self._empty = 1
if span > 1:
self.htmllist += (Html(tag, colspan=str(span),
class_=style_name),)
self._col += span
else:
self.htmllist += (Html(tag, colspan=str(span),
width=str(self._tbl.get_column_width(
self._col))+ '%',
class_=style_name),)
self._col += 1
def end_cell(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_paragraph(self, style_name, leader=None):
"""
Overwrite base method
"""
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
level = style.get_header_level()
if level == 0:
#a normal paragraph
self.htmllist += (Html('p', class_=style_name, inline=True),)
elif level == 1:
if self.__title_written == -1 and \
style_name.upper().find('TITLE') != -1:
self.__title_written = 0
self.htmllist += (Html('div', id="header"),)
self.htmllist += (Html('h1', class_=style_name, id='SiteTitle',
inline=True),)
else:
self.htmllist += (Html('h1', class_=style_name, inline=True),)
elif 2<= level <= 5:
tag = 'h'+str(level+1)
self.htmllist += (Html(tag, class_=style_name, inline=True),)
else:
# a low level header
self.htmllist += (Html('div', id='grampsheading',
class_=style_name),)
if leader is not None:
self.write_text(leader+' ')
def end_paragraph(self):
"""
Overwrite base method
"""
if self._empty == 1:
self.__empty_char()
self._empty = 0
self.__reduce_list()
if self.__title_written == 0:
self.__title_written = 1
#close div statement
self.__reduce_list()
self.write_title()
def start_bold(self):
"""
Overwrite base method
"""
self.htmllist += [Html('strong')]
def end_bold(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_superscript(self):
"""
Overwrite base method
"""
self.htmllist += [Html('sup')]
def end_superscript(self):
"""
Overwrite base method
"""
self.__reduce_list()
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the html doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. HtmlDoc will show the html as pure text, so
no escaping will happen.
links: bool, make URLs clickable if True
"""
text = str(styledtext)
self.htmllist += [Html('div', id='grampsstylednote')]
if contains_html:
#just dump the note out as it is. Adding markup would be dangerous
# as it could destroy the html. If html code, one can do the
self.start_paragraph(style_name)
self.__write_text(text, markup=True, links=links)
self.end_paragraph()
else:
s_tags = styledtext.get_tags()
markuptext = self._backend.add_markup_from_styled(text, s_tags,
split='\n')
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
# we explicitly set _empty because start and end para do not seem
# to do a very good job at setting them
linenb = 1
# The code is tricky here, because we don't want to start a new para
# at the end of the last line if there is no newline there.
# Instead, we want to just end the current para.
for line in markuptext.split('\n'):
[line, sigcount] = process_spaces(line, format)
if sigcount == 0:
if inpara == False:
# needed for runs of three or more newlines
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
self.end_paragraph()
inpara = False
linenb = 1
else:
if inpara == False:
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
if linenb > 1:
self.htmllist[-1] += Html('br')
self.__write_text(line, markup=True, links=links)
self._empty = 0 # para is not empty
linenb += 1
if inpara == True:
self.end_paragraph()
if sigcount == 0:
# if the last line was blank, then as well as outputting the previous para,
# which we have just done,
# we also output a new blank para
self.start_paragraph(style_name)
self._empty = 1 # para is empty
self.end_paragraph()
#end div element
self.__reduce_list()
def add_media_object(self, name, pos, w_cm, h_cm, alt='', style_name=None, crop=None):
"""
Overwrite base method
"""
self._empty = 0
size = int(max(w_cm, h_cm) * float(150.0/2.54))
refname = "is%s" % os.path.basename(name)
imdir = self._backend.datadirfull()
try:
resize_to_jpeg(name, imdir + os.sep + refname, size, size, crop=crop)
except:
LOG.warn(_("Could not create jpeg version of image %(name)s") %
{'name' : name})
return
if len(alt):
alt = '<br />'.join(alt)
if pos not in ["right", "left"] :
if len(alt):
self.htmllist[-1] += Html('div') + (
Html('img', src= imdir + os.sep + refname,
border = '0', alt=alt),
Html('p', class_="DDR-Caption") + alt
)
else:
self.htmllist[-1] += Html('img', src= imdir + os.sep + refname,
border = '0', alt=alt)
else:
if len(alt):
self.htmllist[-1] += Html('div', style_="float: %s; padding: 5px; margin: 0;" % pos) + (
Html('img', src= imdir + os.sep + refname,
border = '0', alt=alt),
Html('p', class_="DDR-Caption") + alt
)
else:
self.htmllist[-1] += Html('img', src= imdir + os.sep + refname,
border = '0', alt=alt, align=pos)
def page_break(self):
"""
overwrite base method so page break has no effect
"""
pass
def start_link(self, link):
"""
Starts a section to add a link. Link is a URI.
"""
self.htmllist += [Html('a', href=link, **self.__link_attrs)]
def stop_link(self):
"""
Stop a section of a link.
"""
self.__reduce_list()
def start_underline(self):
"""
Starts a section of underlining.
"""
self.htmllist += [Html('u')]
def stop_underline(self):
"""
Stop underlining.
"""
self.__reduce_list()
def set_link_attrs(self, attrs):
"""
Set some a attributes/values. attrs is a dictionary, eg
{"style": "...", "class": "..."}
"""
self.__link_attrs = attrs
|
arunkgupta/gramps
|
gramps/plugins/docgen/htmldoc.py
|
Python
|
gpl-2.0
| 22,637
|
[
"Brian"
] |
964c2c61c369afa2f298b0b1932fd1232cd62f48d04974e1ea63a611fcf8ca59
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensorflow_io.experimental.filter"""
from tensorflow_io.python.experimental.filter_ops import ( # pylint: disable=unused-import
gaussian,
laplacian,
prewitt,
sobel,
gabor,
)
|
tensorflow/io
|
tensorflow_io/python/api/experimental/filter.py
|
Python
|
apache-2.0
| 888
|
[
"Gaussian"
] |
58a25ef3d756d5bd14fbf4d5b685f87a85df8f1d97351bd35cba54134915b11d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.