text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
'''
A module for storing meta data of a (reference) genome
Created on Nov 5, 2012
@author: Shunping Huang
'''
import os
import tempfile
import xml.etree.ElementTree as ET
from modtools import alias
__all__ = ['defaultXML', 'MetaData']
mm9_xml = '''
<genome>
<name>mm9</name>
<alias>NCBI37</alias>
<alias>NCBI Build 37</alias>
<alias>MGSCv37</alias>
<source>
<file><url>ftp://ftp-mouse.sanger.ac.uk/ref/NCBIM37_um.fa</url></file>
</source>
<source>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr1.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr2.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr3.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr4.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr5.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr6.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr7.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr8.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr9.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr10.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr11.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr12.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr13.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr14.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr15.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr16.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr17.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr18.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chr19.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chrX.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chrY.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm9/chromosomes/chrM.fa.gz</url></file>
</source>
<source>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr1.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr2.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr3.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr4.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr5.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr6.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr7.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr8.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr9.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr10.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr11.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr12.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr13.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr14.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr15.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr16.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr17.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr18.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chr19.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chrX.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chrY.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.37.1/Assembled_chromosomes/mm_ref_chrM.fa.gz</url></file>
</source>
<chromosome><name>1</name> <alias>chr1</alias> <length>197195432</length></chromosome>
<chromosome><name>2</name> <alias>chr2</alias> <length>181748087</length></chromosome>
<chromosome><name>3</name> <alias>chr3</alias> <length>159599783</length></chromosome>
<chromosome><name>4</name> <alias>chr4</alias> <length>155630120</length></chromosome>
<chromosome><name>5</name> <alias>chr5</alias> <length>152537259</length></chromosome>
<chromosome><name>6</name> <alias>chr6</alias> <length>149517037</length></chromosome>
<chromosome><name>7</name> <alias>chr7</alias> <length>152524553</length></chromosome>
<chromosome><name>8</name> <alias>chr8</alias> <length>131738871</length></chromosome>
<chromosome><name>9</name> <alias>chr9</alias> <length>124076172</length></chromosome>
<chromosome><name>10</name><alias>chr10</alias><length>129993255</length></chromosome>
<chromosome><name>11</name><alias>chr11</alias><length>121843856</length></chromosome>
<chromosome><name>12</name><alias>chr12</alias><length>121257530</length></chromosome>
<chromosome><name>13</name><alias>chr13</alias><length>120284312</length></chromosome>
<chromosome><name>14</name><alias>chr14</alias><length>125194864</length></chromosome>
<chromosome><name>15</name><alias>chr15</alias><length>103494974</length></chromosome>
<chromosome><name>16</name><alias>chr16</alias> <length>98319150</length></chromosome>
<chromosome><name>17</name><alias>chr17</alias> <length>95272651</length></chromosome>
<chromosome><name>18</name><alias>chr18</alias> <length>90772031</length></chromosome>
<chromosome><name>19</name><alias>chr19</alias> <length>61342430</length></chromosome>
<chromosome><name>X</name><alias>chrX</alias> <length>166650296</length></chromosome>
<chromosome><name>Y</name><alias>chrY</alias> <length>15902555</length></chromosome>
<chromosome><name>M</name><alias>chrM</alias><alias>MT</alias><alias>chrMT</alias><length>16299</length></chromosome>
</genome>
'''
mm10_xml = '''
<genome>
<name>mm10</name>
<alias>GRCm38</alias>
<alias>Genome Reference Consortium Mouse Build 38</alias>
<source>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr1.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr2.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr3.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr4.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr5.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr6.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr7.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr8.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr9.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr10.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr11.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr12.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr13.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr14.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr15.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr16.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr17.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr18.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chr19.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chrX.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chrY.fa.gz</url></file>
<file><url>http://hgdownload.cse.ucsc.edu/goldenPath/mm10/chromosomes/chrM.fa.gz</url></file>
</source>
<source>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr1.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr2.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr3.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr4.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr5.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr6.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr7.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr8.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr9.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr10.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr11.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr12.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr13.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr14.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr15.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr16.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr17.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr18.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chr19.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chrX.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chrY.fa.gz</url></file>
<file><url>ftp://ftp.ncbi.nih.gov/genomes/M_musculus/ARCHIVE/BUILD.38.1/Assembled_chromosomes/seq/mm_ref_GRCm38_chrM.fa.gz</url></file>
</source>
<chromosome><name>1</name> <alias>chr1</alias> <length>195471971</length></chromosome>
<chromosome><name>2</name> <alias>chr2</alias> <length>182113224</length></chromosome>
<chromosome><name>3</name> <alias>chr3</alias> <length>160039680</length></chromosome>
<chromosome><name>4</name> <alias>chr4</alias> <length>156508116</length></chromosome>
<chromosome><name>5</name> <alias>chr5</alias> <length>151834684</length></chromosome>
<chromosome><name>6</name> <alias>chr6</alias> <length>149736546</length></chromosome>
<chromosome><name>7</name> <alias>chr7</alias> <length>145441459</length></chromosome>
<chromosome><name>8</name> <alias>chr8</alias> <length>129401213</length></chromosome>
<chromosome><name>9</name> <alias>chr9</alias> <length>124595110</length></chromosome>
<chromosome><name>10</name><alias>chr10</alias><length>130694993</length></chromosome>
<chromosome><name>11</name><alias>chr11</alias><length>122082543</length></chromosome>
<chromosome><name>12</name><alias>chr12</alias><length>120129022</length></chromosome>
<chromosome><name>13</name><alias>chr13</alias><length>120421639</length></chromosome>
<chromosome><name>14</name><alias>chr14</alias><length>124902244</length></chromosome>
<chromosome><name>15</name><alias>chr15</alias><length>104043685</length></chromosome>
<chromosome><name>16</name><alias>chr16</alias> <length>98207768</length></chromosome>
<chromosome><name>17</name><alias>chr17</alias> <length>94987271</length></chromosome>
<chromosome><name>18</name><alias>chr18</alias> <length>90702639</length></chromosome>
<chromosome><name>19</name><alias>chr19</alias> <length>61431566</length></chromosome>
<chromosome><name>X</name><alias>chrX</alias> <length>171031299</length></chromosome>
<chromosome><name>Y</name><alias>chrY</alias> <length>91744698</length></chromosome>
<chromosome><name>M</name><alias>chrM</alias><alias>MT</alias><alias>chrMT</alias><length>16299</length></chromosome>
</genome>
'''
defaultXML = {'mm9': mm9_xml, 'mm10': mm10_xml}
#open('mm9.xml','wb').write(mm9_xml)
#mm9tree = ET.ElementTree(file='mm9.xml')
#mm9genome = mm9tree.getroot()
#chroms = mm9genome.findall('chromosome')
#mm9len = dict()
#for chrom in chroms:
# mm9len[chrom.find('name').text] = int(chrom.find('length').text)
##mm9tree.write('mm9.out.xml')
#
#open('mm10.xml','wb').write(mm10_xml)
#mm10tree = ET.ElementTree(file='mm10.xml')
#mm10genome = mm9tree.getroot()
#chroms = mm10genome.findall('chromosome')
##mm10tree.write('mm10.out.xml')
#
#mm10len = dict()
#for chrom in chroms:
# mm10len[chrom.find('name').text] = int(chrom.find('length').text)
#
#for chrom in sorted(mm9len.keys()):
# print chrom, mm9len[chrom], mm10len[chrom], mm10len[chrom]-mm9len[chrom], (mm10len[chrom]-mm9len[chrom])*100.0/mm9len[chrom]
class MetaData:
def __init__(self, name=None, fileName = None):
if name is not None:
if name in defaultXML.keys():
self.load(name)
elif os.path.isfile(name+'.xml'):
self.loadFromFile(name+'.xml')
else:
raise ValueError("Cannot find meta data for '%s'" % name)
elif fileName is not None:
self.loadFromFile(fileName)
def load(self, name):
'''Load a default genome'''
# TODO: Remove this temporary file when exit.
tmpName = tempfile.mkstemp('.xml')[1]
open(tmpName, 'wb').write(defaultXML[name])
self.loadFromFile(tmpName)
def loadFromFile(self, fileName):
'''Load meta data from an external XML file'''
tree = ET.ElementTree(file=fileName)
root = tree.getroot()
self.chromNames = [chrom.find('name').text for chrom in root.findall('chromosome')]
self.chromLengths = dict([(chrom.find('name').text,
int(chrom.find('length').text))
for chrom in root.findall('chromosome')])
# print(self.chromNames)
# print(self.chromLengths)
chromClasses = []
for chrom in root.findall('chromosome'):
chromClasses.append([])
chromClasses[-1].append(chrom.find('name').text)
for tag in chrom.findall('alias'):
chromClasses[-1].append(tag.text)
# print(chromClasses)
self.chromAliases = alias.Alias(chromClasses)
def getChromNames(self):
return self.chromNames
def getChromAliases(self):
return self.chromAliases
def getChromLengths(self):
return self.chromLengths
def getChromLength(self, chrom):
basicName = self.chromAliases.getBasicName(chrom)
return self.chromLengths.get(basicName, None)
def verify(self, chrom, fastaFileName):
# TODO: examine a fasta file to see if it matches the metadata.
# So far only the length of chromosome should be checked.
#
# Please note that the fastaFile may contain one or more chromosomes,
# and you need to check all chromosomes in it.
#
# Use pysam to open/index/read fasta file, such as:
# pysam.Fastafile
# pysam.faidx
# fasta.fetch
pass
if __name__ == '__main__':
genome = MetaData('mm9')
print(genome.getChromLength('chr1'))
|
andrewparkermorgan/lapels
|
modtools/metadata.py
|
Python
|
mit
| 18,089
|
[
"pysam"
] |
81c8fba34d177c4226898b6da659d9b1865c9c1771933226f9acea1e11620a18
|
#!/usr/bin/env python
from collections import defaultdict
import numpy as np
from scipy.interpolate import LSQBivariateSpline
######### ######### ######### ######### ######### ######### ######### #########
######### Flat Field models #########
######### ######### ######### ######### ######### ######### ######### #########
class NullFlatField(object):
def __init__(self,*args):
return
def fit(self,*args,**kwargs):
return
def __call__(self,x,y):
return np.zeros_like(x)
class SplineFlatField(object):
def __init__(self,nx,ny):
self.nx = nx
self.ny = ny
# XXX hardcoding in the knots and spline order for now
self.tx = [0,nx/2,nx]
self.ty = [0,ny/2,ny]
self.kx = 3
self.ky = 3
self.splineFit = lambda x,y: np.zeros_like(x)
def fit(self,x,y,f,ivar=None):
self.splineFit = LSQBivariateSpline(x,y,f,self.tx,self.ty,w=ivar,
kx=self.kx,ky=self.ky)
def __call__(self,x,y):
return np.array( [ self.splineFit(_x,_y).squeeze()
for _x,_y in zip(x,y) ] )
def make_image(self,res=1):
x = np.arange(0,self.nx,res)
y = np.arange(0,self.ny,res)
return self.splineFit(x,y).transpose()
class FlatFieldSet(object):
def __init__(self,shape):
self.shape = shape
self.flatfields = []
def get_shape(self):
return self.shape
def __call__(self,indices,x,y):
rv = []
ii = np.ravel_multi_index(indices,self.shape)
for i,flat in enumerate(self.flatfields):
jj = np.where(ii==i)[0]
if len(jj)>0:
rv.append(flat(x[jj],y[jj]))
return np.concatenate(rv)
def __iter__(self):
for ff in self.flatfields:
yield ff
def init_flatfields(shape,nx,ny,method='spline',**kwargs):
flatfields = FlatFieldSet(shape)
print flatfields
if method=='spline':
generator = SplineFlatField
elif method=='array':
generator = lambda nx,ny,**kwargs: np.zeros((nY,nX))
elif method=='null':
generator = NullFlatField
for i in range(np.product(shape)):
flatfields.flatfields.append(generator(nx,ny,**kwargs))
return flatfields
######### ######### ######### ######### ######### ######### ######### #########
######### Ubercalibration algorithm #########
######### ######### ######### ######### ######### ######### ######### #########
class CalibrationObject(object):
def __init__(self,mags,errs,errMin=0.01):
'''CalibrationObject(mags,errs)
An object (star or galaxy) with multiple observations that can be
used for relative photometric calibration. The object is defined
by instrumental magnitudes and errors at each epoch of observation,
the zeropoint (a) and airmass (k) terms corresponding to that
observation, the time of observation (t), and the flatfield
for that observation.
Each term is defined by a set of indices into the master list
and is defined for an object by the set_TERM_indices() method.
Not defining the term indices means that the term will be
ignored.
INPUT:
mags,errs: n-element vectors containing instrumental magnitudes
and errors, measured in ADU
'''
mask = errs <= 0
self.mags = np.ma.masked_array(mags,mask)
self.ivars = np.ma.masked_array(np.clip(errs,errMin,np.inf)**-2,mask)
self.nobs = len(self.mags)
self.a_indices = None
self.k_indices = None
self.t_indices = None
self.x_indices = None
self.flat_indices = None
self.xpos = None
self.ypos = None
def set_xy(self,x,y):
self.xpos = x
self.ypos = y
def set_a_indices(self,a_indices):
self.a_indices = a_indices
def set_k_indices(self,k_indices):
self.k_indices = k_indices
def set_t_indices(self,t_indices):
self.t_indices = t_indices
def set_x_indices(self,x_indices):
self.x_indices = x_indices
def set_flat_indices(self,flat_indices):
self.flat_indices = flat_indices
def set_reference_mag(self,refMag):
self.refMag = refMag
def get_numobs(self):
return self.nobs
def get_instrumental_mags(self):
return self.mags,self.ivars
def get_term_indices(self):
return (self.a_indices,self.k_indices,
self.t_indices,self.x_indices,self.flat_indices)
def get_xy(self):
return (self.xpos,self.ypos)
def update_mask(self,mask):
self.mags.mask |= mask
self.ivars.mask |= mask
class CalibrationObjectSet(object):
def __init__(self,aTerms,kTerms,tVals,airmasses,flatfields,**kwargs):
fit_a = kwargs.get('fit_a',True)
fit_k = kwargs.get('fit_k',True)
fit_dkdt = kwargs.get('fit_dkdt',False)
fit_flat = kwargs.get('fit_flat',False)
dTerms = np.array([])
fTerms = np.array([])
if kwargs.get('flat_poly2d',False):
fTerms = kwargs.get('flat_poly2d')
elif kwargs.get('flat_iteratedfit',False):
pass
#
self.objs = []
self.tVals = tVals
self.airmasses = airmasses
self.flatfields = flatfields
self.nobs = 0
self.params = {
'a':{ 'fit':fit_a, 'terms':aTerms, 'num':aTerms.size },
'k':{ 'fit':fit_k, 'terms':kTerms, 'num':kTerms.size },
'dkdt':{ 'fit':fit_dkdt, 'terms':dTerms, 'num':dTerms.size },
'flat':{ 'fit':fit_flat, 'terms':fTerms, 'num':fTerms.size },
}
self.npar = np.array([self.params[paramName]['num']
for paramName in ['a','k','dkdt','flat']
if self.params[paramName]['fit']])
def add_object(self,calobj):
self.objs.append(calobj)
self.nobs += calobj.get_numobs()
def set_fixed_dkdt(self,dkdt):
self.params['dkdt']['terms'] = np.array([dkdt,])
def get_object_phot(self,obj,returnBoth=False):
ai,ki,ti,xi,fi = obj.get_term_indices()
m_inst,ivar_inst = obj.get_instrumental_mags()
a = self.params['a']['terms'][ai]
k = self.params['k']['terms'][ki]
x = self.get_airmasses(xi)
dt = self.get_obstimes(ti)
dk_dt = self.get_terms('dkdt',0) # using a fixed value
flatfield = self.get_flatfields(fi,*obj.get_xy())
m_cal = m_inst + a - (k + dk_dt*dt)*x + flatfield
if returnBoth:
return m_cal,1/np.sqrt(ivar_inst),m_inst
else:
return m_cal,1/np.sqrt(ivar_inst)
def num_params(self):
return np.sum(self.npar)
def num_objects(self):
return len(self.objs)
def num_observations(self):
return self.nobs
def get_terms(self,p,indices):
if self.params[p]['terms'] is None:
return 0
else:
return self.params[p]['terms'][indices]
def get_obstimes(self,t_indices):
return self.tVals[t_indices]
def get_airmasses(self,x_indices):
return self.airmasses[x_indices]
def get_flatfields(self,flat_indices,x,y):
return self.flatfields(flat_indices,x,y)
def update_params(self,par):
i0 = 0
for p in ['a','k','dkdt','flat']:
nterms = self.params[p]['num']
if self.params[p]['fit']:
shape = self.params[p]['terms'].shape
self.params[p]['terms'].data[:] = \
par[i0:i0+nterms].reshape(shape)
i0 += nterms
def update_flatfields(self):
resv = defaultdict(list)
for obj in self.objs:
mag,ivar = self.get_object_phot(obj) # actually returns err
_,_,_,_,fi = obj.get_term_indices()
x,y = obj.get_xy()
ivar[ivar>0] **= -2 # convert to inverse variance
ii = np.ravel_multi_index(fi,self.flatfields.get_shape())
for i,flat in enumerate(self.flatfields):
# XXX should assume masking is correct here and be cleaner?
jj = np.where((ii==i) & (ivar>0) & ~mag.mask)[0]
if len(jj) > 0:
# XXX refmag should be optional, default is just offset
# to weighted mean mag
dmag = obj.refMag - mag[jj]
resv[i].append((x[jj],y[jj],dmag,ivar[jj]))
for i,flat in enumerate(self.flatfields):
resarr = np.hstack(resv[i])
flat.fit(*resarr)
def __iter__(self):
for obj in self.objs:
yield obj
def parameter_indices(self,paramName,indices):
if not self.params[paramName]['fit']:
raise ValueError
i0 = 0
for p in ['a','k','dkdt','flat']:
if paramName == p:
pshape = self.params[p]['terms'].shape
if len(pshape)==1:
par_ii = i0 + np.asarray(indices)
else:
par_ii = i0 + np.ravel_multi_index(indices,pshape)
break
else:
i0 += self.params[p]['num']
return np.array(par_ii)
def ubercal_solve(calset,**kwargs):
'''Find the best fit parameter values by solving the least squared problem
given in Padmanabhan et al. (2008) eq. 14.
Returns the updated parameters.
'''
minNobs = kwargs.get('minNobs',1)
bigmatrix = kwargs.get('bigmatrix',False)
#rmsFloor = kwargs.get('rmsFloor',0.02)
#
npar = calset.num_params()
nobs = calset.num_observations()
if bigmatrix:
A = np.zeros((nobs,npar))
b = np.zeros(nobs)
cinv = np.zeros(nobs)
i1 = 0
else:
atcinvb = np.zeros(npar)
atcinva = np.zeros((npar,npar))
# iterate over all objects (stars)
for n,obj in enumerate(calset):
if ((n+1)%50)==0:
print 'star #',n+1,' out of ',calset.num_objects()
# collect all observations of this object and the associated
# calibration terms
m_inst,ivar_inst = obj.get_instrumental_mags()
a_indices,k_indices,t_indices,x_indices,flat_indices = \
obj.get_term_indices()
a = calset.get_terms('a',a_indices)
k = calset.get_terms('k',k_indices)
x = calset.get_airmasses(x_indices)
dk_dt = calset.get_terms('dkdt',0) # using a fixed value
dt = calset.get_obstimes(t_indices)
flatfield = calset.get_flatfields(flat_indices,*obj.get_xy())
# construct indices into the parameter axis
nobs_i = obj.get_numobs()
par_a_indx = calset.parameter_indices('a',a_indices)
par_k_indx = calset.parameter_indices('k',k_indices)
ii = np.repeat(np.arange(nobs_i),nobs_i)
jj = np.tile(np.arange(nobs_i),nobs_i)
ai,aj = par_a_indx[ii],par_a_indx[jj]
ki,kj = par_k_indx[ii],par_k_indx[jj]
#
# construct << A^T * C^-1 * B >>
#
# update instrumental magnitude based on current values for fixed
# parameters and for objects with poorly defined free parameters
# XXX need to check here that masked arrays are being handled properly
a_bad = np.ma.masked_array(a.data,~a.mask).filled(0)
k_bad = np.ma.masked_array(k.data,~k.mask).filled(0)
m_inst.data[:] += a_bad - (k_bad + dk_dt*dt)*x + flatfield
# normalized inverse variance weights
w = ivar_inst / np.sum(ivar_inst)
# inverse-variance-weighted mean instrumental magnitude
m_mean = np.sum(w*m_inst)
# if requested, construct the large matrices instead
if bigmatrix:
i2 = i1 + nobs_i
_ii = np.arange(i1,i2) # indexes into rows of A (observations)
b[i1:i2] = -(m_inst - m_mean)
cinv[i1:i2] = ivar_inst
A[_ii,par_a_indx] = 1
A[_ii,par_k_indx] = -x
np.add.at( A[i1:i2], (ii,aj), -w[jj] )
np.add.at( A[i1:i2], (ii,kj), (w*x)[jj] )
i1 += nobs_i
continue
# b column vector (eq. 13)
b = -(m_inst - m_mean)*ivar_inst
wb = np.sum(b)*w
np.add.at( atcinvb, par_a_indx, b-wb )
np.add.at( atcinvb, par_k_indx, -(b-wb)*x )
#
# construct << A^T * C^-1 * A >>
#
at_sub = np.eye(nobs_i) - np.tile(w,(nobs_i,1))
wt = np.dot(at_sub,np.transpose(ivar_inst*at_sub))
#
np.add.at( atcinva, (ai,aj), wt[ii,jj] )
np.add.at( atcinva, (ai,kj), -wt[ii,jj]*x[jj] )
np.add.at( atcinva, (ki,aj), -wt[ii,jj]*x[ii] )
np.add.at( atcinva, (ki,kj), wt[ii,jj]*x[ii]*x[jj] )
if bigmatrix:
atcinvb = np.dot(A.T,cinv*b)
# should use scipy.sparse here but getting a warning
atcinva = np.dot(np.dot(A.T,np.diag(cinv)),A)
# Solve for p
p, _, _, _ = np.linalg.lstsq(atcinva,atcinvb)
return p
|
imcgreer/uberpy
|
uberpy/ubercal.py
|
Python
|
bsd-3-clause
| 11,412
|
[
"Galaxy"
] |
cda49ba46f26954e09096fc0d8d9cf432d765b3bfdf11c559f2e28ce9c29917c
|
# This file is part of eventmq.
#
# eventmq is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option)
# any later version.
#
# eventmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eventmq. If not, see <http://www.gnu.org/licenses/>.
"""
log module for eventmq
this needs so much work.
"""
import errno
import logging
import os
import time
import zmq
import zmq.log.handlers
FORMAT_STANDARD = logging.Formatter(
'%(asctime)s - %(name)s %(levelname)s - %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S%z')
FORMAT_NAMELESS = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S%z')
class PUBHandler(zmq.log.handlers.PUBHandler):
"""
"""
pass
class handlers(object):
"""
log handlers
PUBLISH_HANDLER - blast logs through a pub mechanism
STREAM_LOGGER - logs to stdout/stderr
"""
PUBLISH_HANDLER = PUBHandler
STREAM_HANDLER = logging.StreamHandler
FILE_HANDLER = logging.FileHandler
def setup_logger(base_name, formatter=FORMAT_STANDARD,
handler=handlers.STREAM_HANDLER):
logger = logging.getLogger(base_name)
logger.setLevel(logging.DEBUG)
# remove handlers we don't want
# for h in logger.handlers:
# logger.removeHandler(h)
if handler == handlers.PUBLISH_HANDLER:
_handler_sock = zmq.Context.instance().socket(zmq.PUB)
_handler_sock.bind('tcp://127.0.0.1:33445')
import time
time.sleep(1)
handler = handler(_handler_sock)
handler.root_topic = base_name
else:
handler = handler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def setup_wal_logger(base_name, filename, handler=handlers.FILE_HANDLER):
"""
Write-ahead Log for replaying messages. Should only contain
commands on the data path (REQUEST, SCHEDULE, UNSCHEDULE)
"""
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(filename, "a+") as f:
f.close()
wal = logging.getLogger(base_name)
wal_handler = handler(filename)
formatter = logging.Formatter('%(asctime)s %(message)s')
formatter.converter = time.gmtime
wal_handler.setFormatter(formatter)
wal.addHandler(wal_handler)
wal.setLevel(logging.INFO)
return wal
|
com4/eventmq
|
eventmq/log.py
|
Python
|
lgpl-2.1
| 2,934
|
[
"BLAST"
] |
f1dd766c68a8019d7828d2df308c9386ddba5af8bbc24eaaebde19e4316c3360
|
# coding: utf-8
# Copyright (c) 2015,Vienna University of Technology,
# Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology,
# Department of Geodesy and Geoinformation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Tests for the validation framework
Created on Mon Jul 6 12:49:07 2015
'''
import os
import tempfile
import netCDF4 as nc
import numpy as np
import numpy.testing as nptest
import pytesmo.validation_framework.temporal_matchers as temporal_matchers
import pytesmo.validation_framework.metric_calculators as metrics_calculators
from pytesmo.validation_framework.results_manager import netcdf_results_manager
from datetime import datetime
from pytesmo.io.sat.ascat import AscatH25_SSM
from pytesmo.io.ismn.interface import ISMN_Interface
from pytesmo.validation_framework.validation import Validation
class DataPreparation(object):
"""
Class for preparing the data before validation.
"""
@staticmethod
def prep_reference(reference):
"""
Static method used to prepare the reference dataset (ISMN).
Parameters
----------
reference : pandas.DataFrame
ISMN data.
Returns
-------
reference : pandas.DataFrame
Masked reference.
"""
return reference
@staticmethod
def prep_other(other, other_name,
mask_snow=80,
mask_frozen=80,
mask_ssf=[0, 1]):
"""
Static method used to prepare the other datasets (ASCAT).
Parameters
----------
other : pandas.DataFrame
Containing at least the fields: sm, frozen_prob, snow_prob, ssf.
other_name : string
ASCAT.
mask_snow : int, optional
If set, all the observations with snow probability > mask_snow
are removed from the result. Default: 80.
mask_frozen : int, optional
If set, all the observations with frozen probability > mask_frozen
are removed from the result. Default: 80.
mask_ssf : list, optional
If set, all the observations with ssf != mask_ssf are removed from
the result. Default: [0, 1].
Returns
-------
reference : pandas.DataFrame
Masked reference.
"""
if other_name == 'ASCAT':
# mask frozen
if mask_frozen is not None:
other = other[other['frozen_prob'] < mask_frozen]
# mask snow
if mask_snow is not None:
other = other[other['snow_prob'] < mask_snow]
# mask ssf
if mask_ssf is not None:
other = other[(other['ssf'] == mask_ssf[0]) |
(other['ssf'] == mask_ssf[1])]
return other
def test_ascat_ismn_validation():
"""
Test processing framework with some ISMN and ASCAT sample data
"""
ascat_data_folder = os.path.join(os.path.dirname(__file__), 'test-data',
'sat', 'ascat', 'netcdf', '55R22')
ascat_grid_folder = os.path.join(os.path.dirname(__file__), 'test-data',
'sat', 'ascat', 'netcdf', 'grid')
ascat_reader = AscatH25_SSM(ascat_data_folder, ascat_grid_folder)
ascat_reader.read_bulk = True
ascat_reader._load_grid_info()
# Initialize ISMN reader
ismn_data_folder = os.path.join(os.path.dirname(__file__), 'test-data',
'ismn', 'multinetwork', 'header_values')
ismn_reader = ISMN_Interface(ismn_data_folder)
jobs = []
ids = ismn_reader.get_dataset_ids(
variable='soil moisture',
min_depth=0,
max_depth=0.1)
for idx in ids:
metadata = ismn_reader.metadata[idx]
jobs.append((idx, metadata['longitude'], metadata['latitude']))
# Create the variable ***save_path*** which is a string representing the
# path where the results will be saved. **DO NOT CHANGE** the name
# ***save_path*** because it will be searched during the parallel
# processing!
save_path = tempfile.mkdtemp()
# Create the validation object.
datasets = {
'ISMN': {
'class': ismn_reader, 'columns': [
'soil moisture'
], 'type': 'reference', 'args': [], 'kwargs': {}
},
'ASCAT': {
'class': ascat_reader, 'columns': [
'sm'
], 'type': 'other', 'args': [], 'kwargs': {}, 'grids_compatible':
False, 'use_lut': False, 'lut_max_dist': 30000
}
}
period = [datetime(2007, 1, 1), datetime(2014, 12, 31)]
process = Validation(
datasets=datasets,
data_prep=DataPreparation(),
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0,
reverse=True),
scaling='lin_cdf_match',
scale_to_other=True,
metrics_calculator=metrics_calculators.BasicMetrics(),
period=period,
cell_based_jobs=False)
for job in jobs:
results = process.calc(job)
netcdf_results_manager(results, save_path)
results_fname = os.path.join(
save_path, 'ISMN.soil moisture_with_ASCAT.sm.nc')
vars_should = [u'n_obs', u'tau', u'gpi', u'RMSD', u'lon', u'p_tau',
u'BIAS', u'p_rho', u'rho', u'lat', u'R', u'p_R']
n_obs_should = [360, 385, 1644, 1881, 1927, 479, 140, 251]
rho_should = np.array([0.54618734, 0.71739876, 0.62089276, 0.53246528,
0.30299741, 0.69647062, 0.840593, 0.73913699],
dtype=np.float32)
rmsd_should = np.array([11.53626347, 7.54565048, 17.45193481, 21.19371414,
14.24668026, 14.27493, 13.173215, 12.59192371],
dtype=np.float32)
with nc.Dataset(results_fname) as results:
assert sorted(results.variables.keys()) == sorted(vars_should)
assert sorted(results.variables['n_obs'][:].tolist()) == sorted(
n_obs_should)
nptest.assert_allclose(sorted(rho_should),
sorted(results.variables['rho'][:]))
nptest.assert_allclose(sorted(rmsd_should),
sorted(results.variables['RMSD'][:]))
|
christophreimer/pytesmo
|
tests/test_validation.py
|
Python
|
bsd-3-clause
| 7,851
|
[
"NetCDF"
] |
5b5d3d4c3fd8aa198d360eddc7e063402b84c6cfe03d877ce5ada4cafb069881
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('get_repositories')
@pass_context
@custom_exception
@json_output
def cli(ctx):
"""Get the list of all installed Tool Shed repositories on this Galaxy instance.
Output:
a list of dictionaries containing information about
repositories present in the Tool Shed.
For example::
[{'changeset_revision': '4afe13ac23b6',
'deleted': False,
'dist_to_shed': False,
'error_message': '',
'name': 'velvet_toolsuite',
'owner': 'edward-kirton',
'status': 'Installed'}]
.. versionchanged:: 0.4.1
Changed method name from ``get_tools`` to ``get_repositories`` to
better align with the Tool Shed concepts
.. seealso:: bioblend.galaxy.tools.get_tool_panel()
"""
return ctx.gi.toolshed.get_repositories()
|
galaxy-iuc/parsec
|
parsec/commands/toolshed/get_repositories.py
|
Python
|
apache-2.0
| 999
|
[
"Galaxy"
] |
6f2f284daa4c68d87df1f908818c6e3a098b14012bbf4d06ccc0eb0167d40df4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
This module provides an implementation of the Random Waypoint mobility model.
In this model ([JM96]_, [BMJ+98]_), a node first stops for some random *pause
time*. Then, the node randomly picks a point within the simulation area and
starts moving toward it with a constant, but randomly selected, speed that is
uniformly distributed between the *minimum* and *maximum speed* values. Upon
reaching the destination point (or waypoint), the node pauses again and then
moves toward a newly randomized point. (If the *pause time* is equal to zero,
this leads to continuous mobility.) The whole process is repeated again and
again until simulation ends. The speed and destination of each node are chosen
independently of other nodes.
.. [JM96] David B. Johnson and David A. Maltz. Dynamic Source Routing in Ad
Hoc Wireless Networks. In *Mobile Computing*, edited by Tomasz Imielinski
and Hank Korth, chapter 5, pp. 153--181. Kluwer Academic Publishers, 1996.
.. [BMJ+98] Josh Broch, David A. Maltz, David B. Johnson, Yih-Chun Hu, Jorjeta
Jetcheva. A Performance Comparison of Multi-hop Wireless Ad Hoc Network
Routing Protocols. In Proceedings of the *4th Annual ACM/IEEE International
Conference on Mobile Computing and Networking* (MobiCom 1998), pp. 85--97.
Dallas, Texas, United States, October 1998.
"""
from math import fabs, sqrt
from sim2net.mobility._mobility import Mobility
from sim2net.utility.validation import check_argument_type
__docformat__ = 'reStructuredText'
class RandomWaypoint(Mobility):
"""
This class implements the Random Waypoint mobility model, in which each
node moves along straight lines from one waypoint to another.
The waypoints are randomly picked within the simulation area. The nodes
may also have *pause times* when they reach waypoints, and their speeds are
selected at random between the *minimum* and *maximum speed* values. (All
random picks are uniformly distributed).
.. note::
The :meth:`get_current_position` method computes a position of a node
at the current *simulation step* (see: :mod:`sim2net._time`), so it is
presumed that the method is called at each step of the simulation.
"""
def __init__(self, area, time, initial_coordinates, pause_time=0.0):
"""
*Parameters*:
- **area**: an object representing the simulation area;
- **time**: a simulation time object of the
:class:`sim2net._time.Time` class;
- **initial_coordinates** (`list`): initial coordinates of all
nodes; each element of this parameter should be a tuple of two
coordinates: horizontal and vertical (respectively) of type
`float`;
- **pause_time** (`float`): a maximum value of the pause time in
the *simulation time* units (default: `0.0`, see also:
:mod:`sim2net._time`).
*Raises*:
- **ValueError**: raised when the given value of the *area*, *time*
or *initial_coordinates* parameter is `None` or when the given
value of the *pause_time* parameter is less that zero.
(At the beginning, nodes' destination points are set to be equal to its
initial coordinates passed by the *initial_coordinates* parameter.)
"""
if area is None:
raise ValueError('Parameter "area": a simulation area object' \
' expected but "None" value given!')
if time is None:
raise ValueError('Parameter "time": a time abstraction object' \
' expected but "None" value given!')
if initial_coordinates is None:
raise ValueError('Parameter "initial_coordinates": identifiers' \
' of nodes expected but "None" value given!')
super(RandomWaypoint, self).__init__(RandomWaypoint.__name__)
self._area = area
self._time = time
self._destinations = dict()
check_argument_type(RandomWaypoint.__name__, 'initial_coordinates',
list, initial_coordinates, self.logger)
check_argument_type(RandomWaypoint.__name__, 'pause_time', float,
pause_time, self.logger)
if pause_time < 0.0:
raise ValueError('Parameter "pause_time": a value of the pause' \
' time cannot be less that zero but %f given!' \
% float(pause_time))
self._pause_time = float(pause_time)
# { node id:
# { 'destination': (horizontal coordinate, vertical coordinate),
# 'pause time' : time } }
for node_id in range(0, len(initial_coordinates)):
self._destinations[node_id] = dict()
self._destinations[node_id]['destination'] = \
initial_coordinates[node_id]
self._destinations[node_id]['pause time'] = None
self.logger.debug('Destination points has been initialized for %d' \
' nodes' % len(self._destinations))
def _get_new_destination(self):
"""
Randomizes a new waypoint and returns its coordinates as a `tuple`.
"""
return (self.random_generator.uniform(self._area.ORIGIN[0],
self._area.width),
self.random_generator.uniform(self._area.ORIGIN[1],
self._area.height))
def _get_new_pause_time(self):
"""
Randomizes a new pause time and returns its value of type `float`.
"""
return self.random_generator.uniform(0.0, self._pause_time)
def _assign_new_destination(self, node_id, node_speed):
"""
Assigns a new destination point for a node of a given ID and picks its
new speed value. (See also: :meth:`_get_new_destination`)
*Parameters*:
- **node_id** (`int`): an identifier of the node;
- **node_speed**: an object representing the node's speed.
"""
self._destinations[node_id]['destination'] = \
self._get_new_destination()
node_speed.get_new()
if self.logger.isEnabledFor('DEBUG'):
msg = 'A new destination has been selected for the node #%d:' \
' (%f, %f) with the current speed equal to %f'
self.logger.debug(
msg % (node_id, self._destinations[node_id]['destination'][0],
self._destinations[node_id]['destination'][1],
fabs(node_speed.current)))
def _assign_new_pause_time(self, node_id):
"""
Assigns a new pause time for a node of a given ID and returns the
value. If the maximum pause time is set to `0`, `None` value is
assigned and returned.
*Parameters*:
- **node_id** (`int`): an identifier of the node.
*Returns*:
(`float`) a newly randomized pause time.
"""
if self._pause_time > 0:
pause_time = self._get_new_pause_time()
self._destinations[node_id]['pause time'] = pause_time
else:
pause_time = 0.0
self._destinations[node_id]['pause time'] = None
if self.logger.isEnabledFor('DEBUG'):
msg = 'The node #%d is now in its destination position (%f, %f)' \
' with the pause time equal to %f'
self.logger.debug(msg %
(node_id, self._destinations[node_id]['destination'][0],
self._destinations[node_id]['destination'][1], pause_time))
return self._destinations[node_id]['pause time']
def _parallel_trajectory(self, coordinate, destination, step_distance):
"""
Computes the current position of a node when one of its coordinates is
equal to the corresponding destination coordinate. In such a case, the
node moves on a straight line that is parallel to the horizontal or
vertical axis of the simulation area. (See also:
:meth:`_diagonal_trajectory`.)
*Parameters*:
- **coordinate** (`float`): a value of the previous node's
coordinate that is not equal to its corresponding destination
coordinate;
- **destination** (`float`): a value of the destination coordinate;
- **step_distance** (`float`): a distance that the node has moved
between the previous and current simulation steps.
*Returns*:
(`float`) a current value of the node's coordinate.
"""
if destination > coordinate \
and destination >= coordinate + step_distance:
return coordinate + step_distance
if destination < coordinate \
and destination < coordinate - step_distance:
return coordinate - step_distance
return destination
def _diagonal_trajectory(self, node_id, node_coordinates, step_distance):
"""
Computes the current position of a node if its trajectory is not
parallel to the horizontal or vertical axis of the simulation area.
(See also: :meth:`_parallel_trajectory`.)
*Parameters*:
- **node_id** (`int`): an identifier of the node;
- **node_coordinates** (`list`): values of the node's horizontal
and vertical coordinates at the previous simulation step.
- **step_distance** (`float`): a distance that the node has moved
between the previous and current simulation step.
*Returns*:
(`tuple`) current values of the node's horizontal and vertical
coordinates.
"""
horizontal_destination = self._destinations[node_id]['destination'][0]
vertical_destination = self._destinations[node_id]['destination'][1]
horizontal_distance = \
fabs(horizontal_destination - node_coordinates[0])
vertical_distance = fabs(vertical_destination - node_coordinates[1])
distance = \
sqrt(pow(horizontal_distance, 2.0) + pow(vertical_distance, 2.0))
if step_distance >= distance:
return (horizontal_destination, vertical_destination)
horizontal_coordinate = \
(horizontal_distance * step_distance) / distance
vertical_coordinate = \
(vertical_distance * horizontal_coordinate) / horizontal_distance
if node_coordinates[0] < horizontal_destination:
horizontal_coordinate = node_coordinates[0] + horizontal_coordinate
else:
horizontal_coordinate = node_coordinates[0] - horizontal_coordinate
if node_coordinates[1] < vertical_destination:
vertical_coordinate = node_coordinates[1] + vertical_coordinate
else:
vertical_coordinate = node_coordinates[1] - vertical_coordinate
return (horizontal_coordinate, vertical_coordinate)
def _step_move(self, node_id, node_speed, node_coordinates):
"""
Computes a node's position at the current simulation step. If its
trajectory is parallel to the horizontal or vertical axis of the
simulation area, the :meth:`_steady_trajectory` method is used,
otherwise the :meth:`_diagonal_trajectory` method is used.
*Parameters*:
- **node_id** (`int`): an identifier of the node;
- **node_speed**: an object representing the node's speed;
- **node_coordinates** (`list`): values of the node's horizontal
and vertical coordinates at the previous simulation step.
*Returns*:
(`tuple`) current values of the node's horizontal and vertical
coordinates.
"""
horizontal_destination = self._destinations[node_id]['destination'][0]
vertical_destination = self._destinations[node_id]['destination'][1]
if node_coordinates[0] == horizontal_destination \
and node_coordinates[1] == vertical_destination:
return (horizontal_destination, vertical_destination)
step_distance = fabs(node_speed.current) * self._time.simulation_period
if node_coordinates[0] == horizontal_destination:
return \
(horizontal_destination,
self._parallel_trajectory(
node_coordinates[1], vertical_destination, step_distance))
if node_coordinates[1] == vertical_destination:
return \
(self._parallel_trajectory(
node_coordinates[0], horizontal_destination,
step_distance),
vertical_destination)
return self._diagonal_trajectory(node_id, node_coordinates,
step_distance)
def _pause(self, node_id, node_coordinates):
"""
Decreases the current value of a node's pause time and returns the
result of type `float`, or `None` if the pause time has expired.
*Parameters*:
- **node_id** (`int`): an identifier of the node;
- **node_coordinates** (`list`): values of the node's horizontal
and vertical coordinates at the previous simulation step.
"""
self._destinations[node_id]['pause time'] -= \
self._time.simulation_period
if self._destinations[node_id]['pause time'] <= 0:
self._destinations[node_id]['pause time'] = None
else:
if __debug__ and self.logger.isEnabledFor('DEBUG'):
msg = 'The node #%d is still in its destination position' \
' (%f, %f) with the pause time equal to %f'
self.logger.debug(msg %
(node_id, node_coordinates[0], node_coordinates[1],
self._destinations[node_id]['pause time']))
return self._destinations[node_id]['pause time']
def get_current_position(self, node_id, node_speed, node_coordinates):
"""
Calculates and returns a node's position at the current simulation step
in accordance with the Random Waypoint mobility model.
A distance of the route traveled by the node, between the current and
previous simulation steps, is calculated as the product of the current
node's speed and the *simulation period* (see: :mod:`sim2net._time`
module). Therefore, it is assumed that this method is called at every
simulation step.
*Parameters*:
- **node_id** (`int`): an identifier of the node;
- **node_speed**: an object representing the node's speed;
- **node_coordinates** (`list`): values of the node's horizontal
and vertical coordinates at the previous simulation step.
*Returns*:
A tuple containing current values of the node's horizontal and
vertical coordinates.
"""
# pause time?
if self._destinations[node_id]['pause time'] is not None:
if self._pause(node_id, node_coordinates) is None:
self._assign_new_destination(node_id, node_speed)
return node_coordinates
# movement
coordinates = self._step_move(node_id, node_speed, node_coordinates)
assert 0 <= coordinates[0] <= self._area.width \
and 0 <= coordinates[1] <= self._area.height, \
'The new coordinates (%f, %f) exceed dimensions of the' \
' simulation area!' % coordinates
if (coordinates[0] == self._destinations[node_id]['destination'][0]
and
coordinates[1] == self._destinations[node_id]['destination'][1]):
# print "%.30f %.30f" % (coordinates[0], coordinates[1])
if self._assign_new_pause_time(node_id) is not None:
self._destinations[node_id]['destination'] = [None, None]
else:
self._assign_new_destination(node_id, node_speed)
elif __debug__ and self.logger.isEnabledFor('DEBUG'):
msg = 'The current position of the node #%d is (%f, %f) with the' \
' current speed equal to %f'
self.logger.debug(msg % (node_id, coordinates[0], coordinates[1],
fabs(node_speed.current)))
# print "%.30f %.30f" % (coordinates[0], coordinates[1])
return coordinates
|
mkalewski/sim2net
|
sim2net/mobility/random_waypoint.py
|
Python
|
mit
| 17,106
|
[
"VisIt"
] |
1c9d797b934d0a198c6050c215d61aee4f079f609f6a90f9102dc4ed062d0ca0
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# Copyright (c) 2009-2010 Arista Networks, Inc.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""basic checker for Python code"""
import collections
import itertools
import sys
import re
import six
from six.moves import zip # pylint: disable=redefined-builtin
from logilab.common.ureports import Table
import astroid
import astroid.bases
from astroid import are_exclusive, InferenceError
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.utils import EmptyReport
from pylint.reporters import diff_string
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
clobber_in_except,
is_builtin_object,
is_inside_except,
overrides_a_method,
safe_infer,
get_argument_from_call,
has_known_bases,
NoSuchArgumentError,
is_import_error,
unimplemented_abstract_methods,
)
# regex for class/function/variable/constant name
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('[a-z_][a-z0-9_]{2,30}$')
CLASS_ATTRIBUTE_RGX = re.compile(r'([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$')
# do not require a doc string on system methods
NO_REQUIRED_DOC_RGX = re.compile('__.*__')
REVERSED_METHODS = (('__getitem__', '__len__'),
('__reversed__', ))
PY33 = sys.version_info >= (3, 3)
PY3K = sys.version_info >= (3, 0)
BAD_FUNCTIONS = ['map', 'filter']
if sys.version_info < (3, 0):
BAD_FUNCTIONS.append('input')
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = set(('exempt', 'ignore'))
# A mapping from builtin-qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(['.'.join([astroid.bases.BUILTINS, x]) for x in ('set', 'dict', 'list')],
['set()', '{}', '[]'])
)
del re
def _redefines_import(node):
""" Detect that the given node (AssName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not is_import_error(current.parent):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.From, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astroid.For, astroid.ListComp, astroid.SetComp,
astroid.DictComp, astroid.GenExpr)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _loop_exits_early(loop):
"""Returns true if a loop has a break statement in its body."""
loop_nodes = (astroid.For, astroid.While)
# Loop over body explicitly to avoid matching break statements
# in orelse.
for child in loop.body:
if isinstance(child, loop_nodes):
# break statement may be in orelse of child loop.
# pylint: disable=superfluous-parens
for orelse in (child.orelse or ()):
for _ in orelse.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
return True
continue
for _ in child.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
return True
return False
def _is_multi_naming_match(match, node_type, confidence):
return (match is not None and
match.lastgroup is not None and
match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != 'method' or confidence != INFERENCE_FAILURE))
if sys.version_info < (3, 0):
PROPERTY_CLASSES = set(('__builtin__.property', 'abc.abstractproperty'))
else:
PROPERTY_CLASSES = set(('builtins.property', 'abc.abstractproperty'))
def _determine_function_name_type(node):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:returns: One of ('function', 'method', 'attr')
"""
if not node.is_method():
return 'function'
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if (isinstance(decorator, astroid.Name) or
(isinstance(decorator, astroid.Getattr) and
decorator.attrname == 'abstractproperty')):
infered = safe_infer(decorator)
if infered and infered.qname() in PROPERTY_CLASSES:
return 'attr'
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
elif (isinstance(decorator, astroid.Getattr) and
decorator.attrname in ('setter', 'deleter')):
return 'attr'
return 'method'
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type]
except KeyError:
raise EmptyReport()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_'+node_type]
percent = (documented * 100.) / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC'
try:
percent = (stats['badname_'+node_type] * 100.) / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC'
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
nice_stats[node_type].get('percent_documented', '0'),
nice_stats[node_type].get('percent_badname', '0'))
sect.append(Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (isinstance(decorator, astroid.Getattr) and
getattr(decorator.expr, 'name', None) == node.name):
return True
return False
class _BasicChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'basic'
class BasicErrorChecker(_BasicChecker):
msgs = {
'E0100': ('__init__ method is a generator',
'init-is-generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'return-in-init',
'Used when the special class method __init__ has an explicit '
'return value.'),
'E0102': ('%s already defined line %s',
'function-redefined',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'not-in-loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'return-outside-function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'yield-outside-function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'return-arg-in-generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).',
{'maxversion': (3, 3)}),
'E0107': ("Use of the non-existent %s operator",
'nonexistent-operator',
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
'E0108': ('Duplicate argument name %s in function definition',
'duplicate-argument-name',
'Duplicate argument names in function definitions are syntax'
' errors.'),
'E0110': ('Abstract class %r with abstract methods instantiated',
'abstract-class-instantiated',
'Used when an abstract class with `abc.ABCMeta` as metaclass '
'has abstract methods and is instantiated.'),
'W0120': ('Else clause on loop without a break statement',
'useless-else-on-loop',
'Loops should only have an else clause if they can exit early '
'with a break statement, otherwise the statements under else '
'should be on the same scope as the loop itself.'),
}
@check_messages('function-redefined')
def visit_class(self, node):
self._check_redefinition('class', node)
@check_messages('init-is-generator', 'return-in-init',
'function-redefined', 'return-arg-in-generator',
'duplicate-argument-name')
def visit_function(self, node):
if not redefined_by_decorator(node):
self._check_redefinition(node.is_method() and 'method' or 'function', node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(astroid.Return,
skip_klass=(astroid.Function, astroid.Class))
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('init-is-generator', node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if [v for v in values
if not (v is None or
(isinstance(v, astroid.Const) and v.value is None) or
(isinstance(v, astroid.Name) and v.name == 'None')
)]:
self.add_message('return-in-init', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
if not PY33:
for retnode in returns:
if isinstance(retnode.value, astroid.Const) and \
retnode.value.value is not None:
self.add_message('return-arg-in-generator', node=node,
line=retnode.fromlineno)
# Check for duplicate names
args = set()
for name in node.argnames():
if name in args:
self.add_message('duplicate-argument-name', node=node, args=(name,))
else:
args.add(name)
@check_messages('return-outside-function')
def visit_return(self, node):
if not isinstance(node.frame(), astroid.Function):
self.add_message('return-outside-function', node=node)
@check_messages('yield-outside-function')
def visit_yield(self, node):
if not isinstance(node.frame(), (astroid.Function, astroid.Lambda)):
self.add_message('yield-outside-function', node=node)
@check_messages('not-in-loop')
def visit_continue(self, node):
self._check_in_loop(node, 'continue')
@check_messages('not-in-loop')
def visit_break(self, node):
self._check_in_loop(node, 'break')
@check_messages('useless-else-on-loop')
def visit_for(self, node):
self._check_else_on_loop(node)
@check_messages('useless-else-on-loop')
def visit_while(self, node):
self._check_else_on_loop(node)
@check_messages('nonexistent-operator')
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astroid.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('nonexistent-operator', node=node, args=node.op*2)
@check_messages('abstract-class-instantiated')
def visit_callfunc(self, node):
""" Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
infered = next(node.func.infer())
except astroid.InferenceError:
return
if not isinstance(infered, astroid.Class):
return
# __init__ was called
metaclass = infered.metaclass()
abstract_methods = _has_abstract_methods(infered)
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in infered.ancestors():
if ancestor.qname() == 'abc.ABC' and abstract_methods:
self.add_message('abstract-class-instantiated',
args=(infered.name, ),
node=node)
break
return
if metaclass.qname() == 'abc.ABCMeta' and abstract_methods:
self.add_message('abstract-class-instantiated',
args=(infered.name, ),
node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message('useless-else-on-loop', node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
break
_node = _node.parent
else:
self.add_message('not-in-loop', node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not are_exclusive(node, defined_self):
self.add_message('function-redefined', node=node,
args=(redeftype, defined_self.fromlineno))
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = IAstroidChecker
name = 'basic'
msgs = {
'W0101': ('Unreachable code',
'unreachable',
'Used when there is some code behind a "return" or "raise" '
'statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'dangerous-default-value',
'Used when a mutable value as list or dictionary is detected in '
'a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'pointless-statement',
'Used when a statement doesn\'t have (or at least seems to) '
'any effect.'),
'W0105': ('String statement has no effect',
'pointless-string-statement',
'Used when a string is used as a statement (which of course '
'has no effect). This is a particular case of W0104 with its '
'own message so you can easily disable it if you\'re using '
'those strings as documentation, instead of comments.'),
'W0106': ('Expression "%s" is assigned to nothing',
'expression-not-assigned',
'Used when an expression that is not a function call is assigned '
'to nothing. Probably something else was intended.'),
'W0108': ('Lambda may not be necessary',
'unnecessary-lambda',
'Used when the body of a lambda expression is a function call '
'on the same argument list as the lambda itself; such lambda '
'expressions are in all but a few cases replaceable with the '
'function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
'duplicate-key',
'Used when a dictionary expression binds the same key multiple '
'times.'),
'W0122': ('Use of exec',
'exec-used',
'Used when you use the "exec" statement (function for Python '
'3), to discourage its usage. That doesn\'t '
'mean you can not use it !'),
'W0123': ('Use of eval',
'eval-used',
'Used when you use the "eval" function, to discourage its '
'usage. Consider using `ast.literal_eval` for safely evaluating '
'strings containing Python expressions '
'from untrusted sources. '),
'W0141': ('Used builtin function %r',
'bad-builtin',
'Used when a black listed builtin function is used (see the '
'bad-function option). Usual black listed functions are the ones '
'like map, or filter , where Python offers now some cleaner '
'alternative like list comprehension.'),
'W0142': ('Used * or ** magic',
'star-args',
'Used when a function or method is called using `*args` or '
'`**kwargs` to dispatch arguments. This doesn\'t improve '
'readability and should be used with care.'),
'W0150': ("%s statement in finally block may swallow exception",
'lost-exception',
'Used when a break or a return statement is found inside the '
'finally clause of a try...finally block: the exceptions raised '
'in the try clause will be silently swallowed instead of being '
're-raised.'),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'assert-on-tuple',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'C0121': ('Missing required attribute "%s"', # W0103
'missing-module-attribute',
'Used when an attribute required for modules is missing.'),
'E0109': ('Missing argument to reversed()',
'missing-reversed-argument',
'Used when reversed() builtin didn\'t receive an argument.'),
'E0111': ('The first reversed() argument is not a sequence',
'bad-reversed-sequence',
'Used when the first argument to reversed() builtin '
'isn\'t a sequence (does not implement __reversed__, '
'nor __getitem__ and __len__'),
}
options = (('required-attributes',
{'default' : (), 'type' : 'csv',
'metavar' : '<attributes>',
'help' : 'Required attributes for module, separated by a '
'comma'}
),
('bad-functions',
{'default' : BAD_FUNCTIONS,
'type' :'csv', 'metavar' : '<builtin function names>',
'help' : 'List of builtins function names that should not be '
'used, separated by a comma'}
),
)
reports = (('RP0101', 'Statistics by type', report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0)
@check_messages('missing-module-attribute')
def visit_module(self, node):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
for attr in self.config.required_attributes:
if attr not in node:
self.add_message('missing-module-attribute', node=node, args=attr)
def visit_class(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
@check_messages('pointless-statement', 'pointless-string-statement',
'expression-not-assigned')
def visit_discard(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value,
six.string_types):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(scope, (astroid.Class, astroid.Module, astroid.Function)):
if isinstance(scope, astroid.Function) and scope.name != '__init__':
pass
else:
sibling = expr.previous_sibling()
if (sibling is not None and sibling.scope() is scope and
isinstance(sibling, astroid.Assign)):
return
self.add_message('pointless-string-statement', node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (isinstance(expr, (astroid.Yield, astroid.CallFunc)) or
(isinstance(node.parent, astroid.TryExcept) and
node.parent.body == [node])):
return
if any(expr.nodes_of_class(astroid.CallFunc)):
self.add_message('expression-not-assigned', node=node,
args=expr.as_string())
else:
self.add_message('pointless-statement', node=node)
@check_messages('unnecessary-lambda')
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.CallFunc):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
# XXX are lambda still different with astroid >= 0.18 ?
# *args and **kwargs need to be treated specially, since they
# are structured differently between the lambda and the function
# call (in the lambda they appear in the args.args list and are
# indicated as * and ** by two bits in the lambda's flags, but
# in the function call they are omitted from the args list and
# are indicated by separate attributes on the function call node).
ordinary_args = list(node.args.args)
if node.args.kwarg:
if (not call.kwargs
or not isinstance(call.kwargs, astroid.Name)
or node.args.kwarg != call.kwargs.name):
return
elif call.kwargs:
return
if node.args.vararg:
if (not call.starargs
or not isinstance(call.starargs, astroid.Name)
or node.args.vararg != call.starargs.name):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(call.args):
return
for i in range(len(ordinary_args)):
if not isinstance(call.args[i], astroid.Name):
return
if node.args.args[i].name != call.args[i].name:
return
if (isinstance(node.body.func, astroid.Getattr) and
isinstance(node.body.func.expr, astroid.CallFunc)):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
self.add_message('unnecessary-lambda', line=node.fromlineno, node=node)
@check_messages('dangerous-default-value')
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and 'method' or 'function'] += 1
self._check_dangerous_default(node)
def _check_dangerous_default(self, node):
# check for dangerous default values as arguments
is_iterable = lambda n: isinstance(n, (astroid.List,
astroid.Set,
astroid.Dict))
for default in node.args.defaults:
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (isinstance(value, astroid.Instance) and
value.qname() in DEFAULT_ARGUMENT_SYMBOLS):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif type(value) is astroid.Instance or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.CallFunc):
msg = '%s() (%s)' % (value.name, value.qname())
else:
msg = '%s (%s)' % (default.as_string(), value.qname())
else:
# this argument is a name
msg = '%s (%s)' % (default.as_string(),
DEFAULT_ARGUMENT_SYMBOLS[value.qname()])
self.add_message('dangerous-default-value',
node=node,
args=(msg, ))
@check_messages('unreachable', 'lost-exception')
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astroid.Function,))
@check_messages('unreachable')
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('unreachable', 'lost-exception')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astroid.For, astroid.While,))
@check_messages('unreachable')
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('exec-used')
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('exec-used', node=node)
@check_messages('bad-builtin', 'star-args', 'eval-used',
'exec-used', 'missing-reversed-argument',
'bad-reversed-sequence')
def visit_callfunc(self, node):
"""visit a CallFunc node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or
name in node.root()):
if name == 'exec':
self.add_message('exec-used', node=node)
elif name == 'reversed':
self._check_reversed(node)
elif name == 'eval':
self.add_message('eval-used', node=node)
if name in self.config.bad_functions:
self.add_message('bad-builtin', node=node, args=name)
if node.starargs or node.kwargs:
scope = node.scope()
if isinstance(scope, astroid.Function):
toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg),
(node.kwargs, scope.args.kwarg)) if n]
if toprocess:
for cfnode, fargname in toprocess[:]:
if getattr(cfnode, 'name', None) == fargname:
toprocess.remove((cfnode, fargname))
if not toprocess:
return # star-args can be skipped
self.add_message('star-args', node=node.func)
@check_messages('assert-on-tuple')
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astroid.Tuple) and \
len(node.test.elts) == 2:
self.add_message('assert-on-tuple', node=node)
@check_messages('duplicate-key')
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message('duplicate-key', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('unreachable', node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('lost-exception', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = safe_infer(get_argument_from_call(node, position=0))
except NoSuchArgumentError:
self.add_message('missing-reversed-argument', node=node)
else:
if argument is astroid.YES:
return
if argument is None:
# Nothing was infered.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.CallFunc):
try:
func = next(node.args[0].func.infer())
except InferenceError:
return
if (getattr(func, 'name', None) == 'iter' and
is_builtin_object(func)):
self.add_message('bad-reversed-sequence', node=node)
return
if isinstance(argument, astroid.Instance):
if (argument._proxied.name == 'dict' and
is_builtin_object(argument._proxied)):
self.add_message('bad-reversed-sequence', node=node)
return
elif any(ancestor.name == 'dict' and is_builtin_object(ancestor)
for ancestor in argument._proxied.ancestors()):
# mappings aren't accepted by reversed()
self.add_message('bad-reversed-sequence', node=node)
return
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
# Check if it is a .deque. It doesn't seem that
# we can retrieve special methods
# from C implemented constructs.
if argument._proxied.qname().endswith(".deque"):
return
self.add_message('bad-reversed-sequence', node=node)
elif not isinstance(argument, (astroid.List, astroid.Tuple)):
# everything else is not a proper sequence for reversed()
self.add_message('bad-reversed-sequence', node=node)
_NAME_TYPES = {
'module': (MOD_NAME_RGX, 'module'),
'const': (CONST_NAME_RGX, 'constant'),
'class': (CLASS_NAME_RGX, 'class'),
'function': (DEFAULT_NAME_RGX, 'function'),
'method': (DEFAULT_NAME_RGX, 'method'),
'attr': (DEFAULT_NAME_RGX, 'attribute'),
'argument': (DEFAULT_NAME_RGX, 'argument'),
'variable': (DEFAULT_NAME_RGX, 'variable'),
'class_attribute': (CLASS_ATTRIBUTE_RGX, 'class attribute'),
'inlinevar': (COMP_VAR_RGX, 'inline iteration'),
}
def _create_naming_options():
name_options = []
for name_type, (rgx, human_readable_name) in six.iteritems(_NAME_TYPES):
name_type = name_type.replace('_', '-')
name_options.append((
'%s-rgx' % (name_type,),
{'default': rgx, 'type': 'regexp', 'metavar': '<regexp>',
'help': 'Regular expression matching correct %s names' % (human_readable_name,)}))
name_options.append((
'%s-name-hint' % (name_type,),
{'default': rgx.pattern, 'type': 'string', 'metavar': '<string>',
'help': 'Naming hint for %s names' % (human_readable_name,)}))
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
'C0102': ('Black listed name "%s"',
'blacklisted-name',
'Used when the name is listed in the black list (unauthorized '
'names).'),
'C0103': ('Invalid %s name "%s"%s',
'invalid-name',
'Used when the name doesn\'t match the regular expression '
'associated to its type (constant, variable, class...).'),
}
options = (('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
('name-group',
{'default' : (),
'type' :'csv', 'metavar' : '<name1:name2>',
'help' : ('Colon-delimited sets of names that determine each'
' other\'s naming style when the name regexes'
' allow several styles.')}
),
('include-naming-hint',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help': 'Include a hint for the correct naming format with invalid-name'}
),
) + _create_naming_options()
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
def open(self):
self.stats = self.linter.add_stats(badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0)
for group in self.config.name_group:
for name_type in group.split(':'):
self._name_group[name_type] = 'group_%s' % (group,)
@check_messages('blacklisted-name', 'invalid-name')
def visit_module(self, node):
self._check_name('module', node.name.split('.')[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in six.itervalues(self._bad_names):
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in six.itervalues(all_groups):
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group))
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@check_messages('blacklisted-name', 'invalid-name')
def visit_class(self, node):
self._check_name('class', node.name, node)
for attr, anodes in six.iteritems(node.instance_attrs):
if not list(node.instance_attr_ancestors(attr)):
self._check_name('attr', attr, anodes[0])
@check_messages('blacklisted-name', 'invalid-name')
def visit_function(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
confidence = HIGH
if node.is_method():
if overrides_a_method(node.parent.frame(), node.name):
return
confidence = (INFERENCE if has_known_bases(node.parent.frame())
else INFERENCE_FAILURE)
self._check_name(_determine_function_name_type(node),
node.name, node, confidence)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
@check_messages('blacklisted-name', 'invalid-name')
def visit_global(self, node):
for name in node.names:
self._check_name('const', name, node)
@check_messages('blacklisted-name', 'invalid-name')
def visit_assname(self, node):
"""check module level assigned names"""
frame = node.frame()
ass_type = node.ass_type()
if isinstance(ass_type, astroid.Comprehension):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(ass_type, astroid.Assign) and not in_loop(ass_type):
if isinstance(safe_infer(ass_type.value), astroid.Class):
self._check_name('class', node.name, node)
else:
if not _redefines_import(node):
# Don't emit if the name redefines an import
# in an ImportError except handler.
self._check_name('const', node.name, node)
elif isinstance(ass_type, astroid.ExceptHandler):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.Function):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.Class):
if not list(frame.local_attr_ancestors(node.name)):
self._check_name('class_attribute', node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(self, node, node_type, name, confidence):
type_label = _NAME_TYPES[node_type][1]
hint = ''
if self.config.include_naming_hint:
hint = ' (hint: %s)' % (getattr(self.config, node_type + '_name_hint'))
self.add_message('invalid-name', node=node, args=(type_label, name, hint),
confidence=confidence)
self.stats['badname_' + node_type] += 1
def _check_name(self, node_type, name, node, confidence=HIGH):
"""check for a name using the type's regexp"""
if is_inside_except(node):
clobbering, _ = clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('blacklisted-name', node=node, args=name)
return
regexp = getattr(self.config, node_type + '_rgx')
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None:
self._raise_name_warning(node, node_type, name, confidence)
class DocStringChecker(_BasicChecker):
msgs = {
'C0111': ('Missing %s docstring', # W0131
'missing-docstring',
'Used when a module, function, class or method has no docstring.'
'Some special methods like __init__ doesn\'t necessary require a '
'docstring.'),
'C0112': ('Empty %s docstring', # W0132
'empty-docstring',
'Used when a module, function, class or method has an empty '
'docstring (it would be too easy ;).'),
}
options = (('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'function or class names that do not require a '
'docstring.'}
),
('docstring-min-length',
{'default' : -1,
'type' : 'int', 'metavar' : '<int>',
'help': ('Minimum line length for functions/classes that'
' require docstrings, shorter ones are exempt.')}
),
)
def open(self):
self.stats = self.linter.add_stats(undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
@check_messages('missing-docstring', 'empty-docstring')
def visit_module(self, node):
self._check_docstring('module', node)
@check_messages('missing-docstring', 'empty-docstring')
def visit_class(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
@check_messages('missing-docstring', 'empty-docstring')
def visit_function(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = node.is_method() and 'method' or 'function'
if isinstance(node.parent.frame(), astroid.Class):
overridden = False
confidence = (INFERENCE if has_known_bases(node.parent.frame())
else INFERENCE_FAILURE)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and \
isinstance(ancestor[node.name], astroid.Function):
overridden = True
break
self._check_docstring(ftype, node,
report_missing=not overridden,
confidence=confidence)
else:
self._check_docstring(ftype, node)
def _check_docstring(self, node_type, node, report_missing=True,
confidence=HIGH):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
if not report_missing:
return
if node.body:
lines = node.body[-1].lineno - node.body[0].lineno + 1
else:
lines = 0
if node_type == 'module' and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != 'module' and max_lines > -1 and lines < max_lines:
return
self.stats['undocumented_'+node_type] += 1
if (node.body and isinstance(node.body[0], astroid.Discard) and
isinstance(node.body[0].value, astroid.CallFunc)):
# Most likely a string with a format call. Let's see.
func = safe_infer(node.body[0].value.func)
if (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)):
# Strings in Python 3, others in Python 2.
if PY3K and func.bound.name == 'str':
return
elif func.bound.name in ('str', 'unicode', 'bytes'):
return
self.add_message('missing-docstring', node=node, args=(node_type,),
confidence=confidence)
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('empty-docstring', node=node, args=(node_type,),
confidence=confidence)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {'W0107': ('Unnecessary pass statement',
'unnecessary-pass',
'Used when a "pass" statement that can be avoided is '
'encountered.'),
}
@check_messages('unnecessary-pass')
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1:
self.add_message('unnecessary-pass', node=node)
class LambdaForComprehensionChecker(_BasicChecker):
"""check for using a lambda where a comprehension would do.
See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196>
where GvR says comprehensions would be clearer.
"""
msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension',
'deprecated-lambda',
'Used when a lambda is the first argument to "map" or '
'"filter". It could be clearer as a list '
'comprehension or generator expression.',
{'maxversion': (3, 0)}),
}
@check_messages('deprecated-lambda')
def visit_callfunc(self, node):
"""visit a CallFunc node, check if map or filter are called with a
lambda
"""
if not node.args:
return
if not isinstance(node.args[0], astroid.Lambda):
return
infered = safe_infer(node.func)
if (is_builtin_object(infered)
and infered.name in ['map', 'filter']):
self.add_message('deprecated-lambda', node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(LambdaForComprehensionChecker(linter))
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/pylint/checkers/base.py
|
Python
|
agpl-3.0
| 55,089
|
[
"VisIt"
] |
554b789342dc7dc66076c49c54cceef0b062b1ac43bb1a583f1403a22d491f83
|
# -*- coding: utf-8 -*-
#
# RDKit documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 7 18:51:45 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('exts'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest','extapi']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'The RDKit'
copyright = u'2013, Greg Landrum'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2013.06'
# The full version, including alpha/beta/rc tags.
release = '2013.06.1pre'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../Images/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**':['globaltoc.html','relations.html','sourcelink.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'RDKitdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RDKit.tex', u'RDKit Documentation',
u'Greg Landrum', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rdkit', u'RDKit Documentation',
[u'Greg Landrum'], 1)
]
|
rdkit/rdkit-orig
|
Docs/Book/conf.py
|
Python
|
bsd-3-clause
| 7,103
|
[
"RDKit"
] |
48a4fc4e76e4844452bd6ab3801eab47ef3ecf47d1ecc7c97c7e78c339017365
|
import os
from glob import glob
from collections import Counter
import numpy as np
from json import dump, load
from traitlets import TraitError
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.gridspec as gridspec
from traitlets import validate
from ipywidgets import IntRangeSlider
from .core import SfgRecord, concatenate_list_of_SfgRecords
from sfg2d.utils.config import CONFIG
X_PIXEL_INDEX = CONFIG['X_PIXEL_INDEX']
Y_PIXEL_INDEX = CONFIG['Y_PIXEL_INDEX']
SPEC_INDEX = CONFIG['SPEC_INDEX']
FRAME_AXIS_INDEX = CONFIG['FRAME_AXIS_INDEX']
PP_INDEX = CONFIG['PP_INDEX']
PIXEL = CONFIG['PIXEL']
debug = 0
class WidgetBase():
"""A Base class for my widgets.
Uses SfgRecord object as data container.
Consists out of several ipywidgets.
Plots are rendered using matplotlib.
Define any ipwidget you need within this class, within the
*WidgetBase._init_widget* function. Default or context
dependet options of the widgets can be set during the
*WidgetBase._conf_widget_with_data* function.
The observers of the widgets are set within the *_init_observer*
function, or if it is an figure updating widget within the
*_init_figure_observers* function.
If an observer is defined, also define an unobserver in the
*_unobserver* function.
"""
def __init__(self, data=SfgRecord(), fig=None, ax=None,
central_wl=674, vis_wl=800, figsize=None):
# SfgRecord obj holding the data.
self.data = data
# 4 dim numpy array representing the baseline
# Internal objects
# Figure to draw on
self._fig = fig
# Central wavelength of the camera
# Size of the figure
self._figsize = figsize
# List of widgets that update the figure
self._figure_widgets = []
# Buffer to save autoscale values with.
self._autoscale_buffer = [None, None]
self._autoscale_buffer_2 = [None, None]
# Buffer to save x_rois upon switching data.
self._rois_x_pixel_buffer = [slice(None, None)]
# Buffer unpumped and pumped data throughout switching data files
self._unpumped_index_buffer = 0
self._pumped_index_buffer = 1
# List of widgets to display
self.children = []
# Setup all widgets
self._init_widget()
def __call__(self):
"""Use call to actually Render the widgets on the notebook."""
from IPython.display import display
self._conf_widget_with_data()
self._init_observer()
self._init_figure_observers()
self._update_figure()
display(self.children)
self.fig
def _init_widget(self):
"""Init all widgets.
Add widgets within this function. If possible you can give default
properties to the widgets already within this function. Also use
this function to combine many widgets into e.g. boxes
"""
import ipywidgets as wi
# ## Any widget that we need at some point can be added here.
# Widget to enter a folder path as string
self.wTextFolder = wi.Text(
layout=wi.Layout(width='90%'),
)
# Selection dialogue to select data from a list of files
self.wSelectFile = wi.SelectMultiple(
layout=wi.Layout(width='41%'),
)
# Checkbox to toggle the visibility of the baseline data
self.wCheckShowBase = wi.Checkbox(
description='Baseline',
value=False,
)
# Checkbox to toggel the visiblitiy of the norm
self.wCheckShowNorm = wi.Checkbox(
description='Norm',
value=False
)
# Checkbox to toggle visibility of bleach
self.wCheckShowBleach = wi.Checkbox(
description='Bleach',
value=False,
)
# Dropdown to Choose Type of Bleach
self.wDropdownBleachOpt = wi.Dropdown(
description='Opt:',
options=['rel', 'abs'],
value='rel',
)
# Dropdown to Choose property of bleach
self.wDropdownBleachProp = wi.Dropdown(
description='Prop:',
options=['rawData', 'basesubed', 'normalized'],
value='basesubed',
)
# Toggle to show Trace of Bleach
self.wCheckShowTracesBleach = wi.Checkbox(
description='Bleach',
value=False,
)
self.wCheckShowTracesRawData = wi.Checkbox(
description='Raw',
value=True,
)
self.wCheckShowTracesBasesubed = wi.Checkbox(
description='Basesubed',
value=False,
)
self.wCheckShowTracesNormalized = wi.Checkbox(
description='Normalized',
value=False,
)
# Region slice to select index for zero_time_subtraction
self.wRangeZeroTime = IntRangeSliderGap(
description="Zero Time",
value=(0, 1), continuous_update=False,
)
# Snap pixel roi.
self.wSnapXRoi = wi.Button(
description="Snap X Region"
)
# Checkbox to toggle the zero_time suntraction of bleach data
self.wCheckShowZeroTimeSubtraction = wi.Checkbox(
description='Sub Zero Time',
value=False,
)
# Slider to select the width of the smoothing kernel
self.wIntSliderSmooth = wi.IntSlider(
continuous_update=False, description="Smooth",
min=1, max=19, step=2,
)
# Slider to select smoothing of baseline
# TODO Is not used yet
self.wIntSliderSmoothBase = wi.IntSlider(
continuous_update=False, description="Smooth",
min=self.wIntSliderSmooth.min,
max=self.wIntSliderSmooth.max,
step=2, value=1,
)
# Checkbox to toggle the Autoscale functionality of matplotlib
self.wCheckAutoscale = wi.Checkbox(
description="Autoscale",
value=True,
)
# Checkbox to toggle the Autoscale functionality of matplotlib
self.wCheckAutoscaleTrace = wi.Checkbox(
description="Autoscale Trace",
value=True,
)
# Slider to select the visible y-pixel/spectra range
self.wRangeSliderPixelY = IntRangeSliderGap(
continuous_update=False, description="Spectra Region"
)
# Slider to select spectra step size.
self.wIntTextPixelYStep = wi.BoundedIntText(
description='Spectra Stepsize', value=1, min=1,
layout=wi.Layout(width='180px',),
)
self.wCheckSpectraMean = wi.Checkbox(
description='Spectra Mean',
value=False
)
self.wDropdownSpectraMode = wi.Dropdown(
description='Spectra Mode',
options=['Index', 'Region'],
value='Region',
layout=wi.Layout(width='180px',),
)
# Slider to select the overall visible x-pixel range
self.wRangeSliderPixelX = IntRangeSliderGap(
continuous_update=False, description="X Region",
max=PIXEL, value=(0, PIXEL),
)
# Slider to select the x-pixel range used within traces
self.wRangeSliderTracePixelX = IntRangeSliderGap(
continuous_update=False, description="Trace Region",
max=PIXEL, value=(int(PIXEL*0.40), int(PIXEL*0.6)),
)
# Textbox to enter central wavelength of the camera in nm
self.wTextCentralWl = wi.FloatText(
description='Central Wl', value=self.data.central_wl,
layout=wi.Layout(
width='180px',
),
)
# Dropdown menu to select x-axis calibration.
self.wDropdownCalib = wi.Dropdown(
description='x-axis', options=['pixel', 'wavelength', 'wavenumber'],
layout=self.wTextCentralWl.layout,
)
# Textbox to enter the wavelength of the upconversion photon
# in nm.
self.wTextVisWl = wi.FloatText(
description='Vis Wl', value=self.data.vis_wl,
layout=self.wTextCentralWl.layout
)
# Slider to select visible pp-delay spectrum
self.wSliderPPDelay = wi.IntSlider(
description="Delay Index", continuous_update=False,
)
self.wRangeSliderPPDelay = IntRangeSliderGap(
continuous_update=False, description="Delay Region",
)
self.wCheckDelayMedian = wi.Checkbox(
description='Delay Median', value=False, disabled=False
)
# Dropdown to choose how Baseline or IR data gets send.
self.wDropdownDelayMode = wi.Dropdown(
description="Delay Mode", value="Index",
options=["Index", "Region"],
layout=wi.Layout(width='180px',)
)
# Slider to select range of frames used for median calculation.
self.wSliderFrame = wi.IntSlider(
description='Frame Index', continuous_update=False
)
self.wRangeSliderFrame = IntRangeSliderGap(
continuous_update=False, description="Frame Region"
)
# Checkbox to toggle the frame wise calculation of a median spectrum.
self.wCheckFrameMedian = wi.Checkbox(
description='Frame Median',
)
# Dropdown to choos how Baseline and IR data gest send
self.wDropdownFrameMode = wi.Dropdown(
description="Frame Mode", value="Index",
options=["Index", "Region"],
layout=wi.Layout(width='180px',)
)
# Slider to select frames for median calculation.
self.wSliderFrame = wi.IntSlider(
description='Frame', continuous_update=False
)
# Textbox to enter an additional constant offset to the baseline.
self.wTextBaselineOffset = wi.FloatText(
description='Offset', value=0,
layout=wi.Layout(width='180px'),
)
# Textbox to enter the index of the pumped spectrum.
self.wIntTextPumped = wi.BoundedIntText(
value=0,
min=0,
max=400, # Number of spectra/ypixels
description='Pumped',
layout=wi.Layout(width='180px'),
)
# Textbox to enter the index of the unpumped spectrum.
self.wIntTextUnpumped = wi.BoundedIntText(
value=1,
min=0,
max=400,
description='Unpumped',
layout=self.wIntTextPumped.layout,
)
# Checkbox to toggle visibility of Raw Spectra.
self.wCheckShowRawData = wi.Checkbox(
description='RawData',
value=True,
)
# Checkbox to toggle visibility of Basesubed.
self.wCheckShowBasesubed = wi.Checkbox(
description='Basesubed',
value=False,
)
# Checkbox to toggle visibility of Normalized.
self.wCheckShowNormalized = wi.Checkbox(
description='Normalized',
value=False,
)
# Dropdown to toggle view of the summed spectra
self.wDropShowTrace = wi.Dropdown(
options=["Raw", "Normalized", "Bleach"],
description='Trace',
value="Raw",
layout=self.wTextCentralWl.layout,
)
self.wTextSaveRecord = wi.Text(
description="File"
)
self.wButtonSaveRecord = wi.Button(
description="Save Record"
)
# ### Aligning boxers ###
self._data_box = wi.VBox([
wi.HBox([
wi.Label("Folder"),
self.wTextFolder,
]),
wi.HBox([
wi.Label('File'),
self.wSelectFile,
]),
])
self._signal_box = wi.VBox([
wi.HBox([
self.wSliderPPDelay,
self.wCheckDelayMedian,
self.wRangeSliderPPDelay,
self.wDropdownDelayMode,
]),
wi.HBox([
self.wSliderFrame,
self.wCheckFrameMedian,
self.wRangeSliderFrame,
self.wDropdownFrameMode,
]),
wi.HBox([
self.wRangeSliderPixelY,
self.wCheckSpectraMean,
self.wIntTextPixelYStep,
self.wRangeSliderPixelX,
]),
wi.HBox([
self.wIntSliderSmooth,
self.wRangeSliderTracePixelX,
])
])
self._calib_box = wi.HBox([
self.wDropdownCalib,
self.wTextCentralWl,
self.wTextVisWl,
self.wCheckAutoscale,
self.wCheckAutoscaleTrace,
])
self._save_record_box = wi.HBox([
self.wTextSaveRecord,
self.wButtonSaveRecord,
])
# List of widgets that update the figure on value change
self._figure_widgets = [
self.wSelectFile,
self.wSliderPPDelay,
self.wRangeSliderPPDelay,
self.wSliderFrame,
self.wCheckDelayMedian,
self.wRangeSliderFrame,
self.wRangeSliderPixelY,
self.wIntTextPixelYStep,
self.wRangeSliderPixelX,
self.wRangeSliderTracePixelX,
self.wCheckFrameMedian,
self.wTextVisWl,
self.wTextCentralWl,
self.wCheckAutoscale,
self.wDropdownCalib,
self.wCheckAutoscaleTrace,
self.wCheckShowNorm,
self.wIntSliderSmooth,
self.wCheckShowBase,
self.wCheckShowBleach,
self.wDropdownBleachOpt,
self.wDropdownBleachProp,
self.wCheckShowRawData,
self.wCheckShowBasesubed,
self.wCheckShowNormalized,
self.wCheckShowTracesBleach,
self.wCheckShowTracesRawData,
self.wCheckShowTracesBasesubed,
self.wCheckShowTracesNormalized,
self.wCheckSpectraMean,
self.wDropdownSpectraMode,
self.wDropdownDelayMode,
self.wDropdownFrameMode,
self.wDropShowTrace,
self.wTextBaselineOffset,
self.wIntTextPumped,
self.wIntTextUnpumped,
self.wCheckShowZeroTimeSubtraction,
self.wRangeZeroTime,
]
# Upon saving the gui state these widgets get saved
self._save_widgets = {
'folder': self.wTextFolder,
'file': self.wSelectFile,
'showBaseline': self.wCheckShowBase,
'checkDelayMedian': self.wCheckDelayMedian,
'showBleach': self.wCheckShowBleach,
'bleachOpt': self.wDropdownBleachOpt,
'bleachProp': self.wDropdownBleachProp,
'showTracesBleach': self.wCheckShowTracesBleach,
'showTracesRawData': self.wCheckShowTracesRawData,
'showTracesBasesubed': self.wCheckShowTracesBasesubed,
'showTracesNormalized': self.wCheckShowTracesNormalized,
'delayMode': self.wDropdownDelayMode,
'frameMode': self.wDropdownFrameMode,
'smoothSlider': self.wIntSliderSmooth,
'smoothBase': self.wIntSliderSmoothBase,
'autoscale': self.wCheckAutoscale,
'autoscaleTrace': self.wCheckAutoscaleTrace,
'pixelY': self.wRangeSliderPixelY,
'pixelY_step': self.wIntTextPixelYStep,
'pixelXpixel': self.wRangeSliderPixelX,
'tracePixelX': self.wRangeSliderTracePixelX,
'centralWl': self.wTextCentralWl,
'calib': self.wDropdownCalib,
'visWl': self.wTextVisWl,
'showNorm': self.wCheckShowNorm,
'pp_delay_slice': self.wRangeSliderPPDelay,
'frame_region': self.wRangeSliderFrame,
'frame_index': self.wSliderFrame,
'frameMedian': self.wCheckFrameMedian,
'frame': self.wSliderFrame,
'baselineOffset': self.wTextBaselineOffset,
'pumped': self.wIntTextPumped,
'unpumped': self.wIntTextUnpumped,
'bleachZeroTimeSubtraction': self.wCheckShowZeroTimeSubtraction,
'showTrace': self.wDropShowTrace,
'spectraMean': self.wCheckSpectraMean,
'spectraMode': self.wDropdownSpectraMode,
'showRawData': self.wCheckShowRawData,
'showBasesubed': self.wCheckShowBasesubed,
'showNormalized': self.wCheckShowNormalized,
'zeroTimeSelec': self.wRangeZeroTime,
'saveRecord': self.wTextSaveRecord,
}
def _conf_widget_with_data(self):
"""Set all widget options and default values according to data.
This uses the data to set the state of the widget. Thus one calles
it usually after loading new data. During operation of the widget
this is usually not called, because then the widget updates the data.
"""
def _set_range_slider_options(slider, record_data_index):
"""Set options of a gaped range slider.
slider: The slider to set the options of,
record_data_index: Index position of the property to set.
"""
slider.max = self.data.rawData.shape[record_data_index]
if self.data.rawData.shape[record_data_index] == 1:
slider.disabled = True
else:
slider.disabled = False
if np.any(np.array(slider.value) >=
slider.max):
slider.value = (0, slider.max)
def _set_int_slider_options(slider, record_data_index):
"""Set options of a slider.
slider: The slider to set the options of,
record_data_index: Index position of the property to set.
"""
slider.max = self.data.rawData.shape[
record_data_index
] - 1
if slider.value > self.wSliderPPDelay.max:
slider.value = self.wSliderPPDelay.max
if slider.max == 1:
slider.disabled = True
else:
slider.disabled = False
# TODO Maybe I should split this up in conf_widget_options
# and conf_widget_values.
_set_range_slider_options(self.wRangeSliderPPDelay, PP_INDEX)
_set_range_slider_options(self.wRangeSliderFrame, FRAME_AXIS_INDEX)
_set_range_slider_options(self.wRangeSliderPixelY, Y_PIXEL_INDEX)
self.wIntTextPixelYStep.max = self.wRangeSliderPixelY.max
if self.wIntTextPixelYStep.value > self.wIntTextPixelYStep.max:
self.wIntTextPixelYStep.value = self.wIntTextPixelYStep.max
_set_range_slider_options(self.wRangeSliderTracePixelX, X_PIXEL_INDEX)
_set_int_slider_options(self.wSliderPPDelay, PP_INDEX)
_set_int_slider_options(self.wSliderFrame, FRAME_AXIS_INDEX)
_set_range_slider_options(self.wRangeZeroTime, PP_INDEX)
self.wTextCentralWl.value = self.data.central_wl
self.wTextVisWl.value = self.data.vis_wl
# Currently not used.
self.wSliderFrame.max = self.data.base.shape[
FRAME_AXIS_INDEX
] - 1
if self.wSliderFrame.max == 1:
self.wSliderFrame.disabled = True
else:
self.wSliderFrame.disabled = False
if self.wSliderFrame.value > self.wSliderFrame.max:
self.wSliderFrame.value = self.wSliderFrame.max
self.wIntTextPumped.max = self.data.rawData.shape[Y_PIXEL_INDEX] - 1
self.wIntTextUnpumped.max = self.wIntTextPumped.max
self.wIntTextUnpumped.value = self.data.unpumped_index
self.wIntTextPumped.value = self.data.pumped_index
if self.wIntTextPumped.value == self.wIntTextUnpumped.value:
self.wIntTextUnpumped.value += 1
self.wTextBaselineOffset.value = self.data.baseline_offset
def _init_figure_observers(self):
"""All observers that call the *update_figure_callback* """
# Because during widget runtime it can be necessary to stop
# and restart the automatic figure updating to prevent flickering
# and to speed up the gui. There is a special function to
# set up the observers and also to remove the observers in the
# figures_widgets list.
for widget in self._figure_widgets:
widget.observe(self._update_figure_callback, "value")
def _unobserve_figure(self):
"""Unobserver figure observers."""
for widget in self._figure_widgets:
try:
widget.unobserve(self._update_figure_callback, 'value')
except ValueError:
if debug:
print('Cant unobserve {} description is {}'.format(
widget, widget.description
))
def _init_observer(self):
"""Set all observer of all subwidgets."""
# This registers the callback functions to the gui elements.
# After a call of _init_observer, the gui elements start to
# actually do something, namely what ever is defined within the
# callback function of the observer.
self.wTextFolder.on_submit(self._on_folder_submit)
self.wSelectFile.observe(self._load_data, 'value')
self.wDropdownCalib.observe(self._on_calib_changed, "value")
self.wTextCentralWl.observe(self.x_spec_renew, "value")
self.wTextVisWl.observe(self.x_spec_renew, "value")
self.wIntTextPumped.observe(self._on_pumped_index_changed, "value")
self.wIntTextUnpumped.observe(self._on_unpumped_index_changed, "value")
self.wCheckDelayMedian.observe(self._on_delay_median_clicked, "value")
self.wDropdownDelayMode.observe(self._on_delay_mode_changed, "value")
self.wCheckFrameMedian.observe(self._on_frame_median_clicked, "value")
self.wDropdownFrameMode.observe(self._on_frame_mode_changed, "value")
self.wRangeSliderPixelX.observe(self._set_roi_x_pixel_spec, "value")
self.wRangeSliderTracePixelX.observe(self._set_roi_trace_x_pixel,
"value")
self.wRangeSliderFrame.observe(self._set_roi_frames,
"value")
self.wRangeSliderPixelY.observe(self._set_roi_spectra,
"value")
self.wRangeSliderPPDelay.observe(self._set_roi_delays,
"value")
self.wButtonSaveRecord.on_click(self._on_save_record)
self.wCheckShowZeroTimeSubtraction.observe(
self._set_zero_time_subtraction, "value"
)
self.wTextBaselineOffset.observe(
self._on_baseline_offset_changed, "value"
)
self.wRangeZeroTime.observe(self._set_zero_time_selec, "value")
self.wSnapXRoi.on_click(self._snap_x_roi)
#self._init_figure_observers()
def _on_folder_submit(self, new=None):
"""Called when folder is changed."""
if not os.path.isdir(self.wTextFolder.value):
print('Warning folder {} not found'.format(self.wTextFolder.value))
return
if debug:
print("_on_folder_submit_called")
if debug > 1:
print("fnames:", self.fnames)
# The *with* is a workaround. I need it in the test functions,
# not the gui. Anyways, it doesn't quite work.
with self.wSelectFile.hold_trait_notifications():
self.wSelectFile.options = self.fnames
def _load_data(self, new=None):
"""Update the internal data objects.
Loads data from hdd, and sets data properties according to gui settings.
Afterwards, the gui settings are configured agains the data again to
ensure consistency.
Sheme:
load data ---> update data ---> configure widget options and values
"""
if len(self.wSelectFile.value) == 0:
return
elif len(self.wSelectFile.value) == 1:
self.data = SfgRecord(
self.folder + "/" + self.wSelectFile.value[0]
)
else:
records = [SfgRecord(self.folder + "/" + fname)
for fname in self.wSelectFile.value]
self.data = concatenate_list_of_SfgRecords(records)
self._unobserve_figure()
self._set_zero_time_subtraction(None)
self._set_roi_trace_x_pixel()
self._set_roi_frames()
self._set_roi_spectra()
self._set_roi_delays()
self._set_pumped_index()
# Deactivating the observers here prevents flickering
# and unneeded calls of _update_figure. Thus we
# call it manually after a recall of _init_observer
self._conf_widget_with_data()
self._init_figure_observers()
self._update_figure()
#print("keep figures unobserved: ", keep_figure_unobserved)
def _set_roi_x_pixel_spec(self, new=None):
self.data.roi_x_pixel_spec = self.wRangeSliderPixelX.slice
def _set_roi_trace_x_pixel(self, new=None):
self._rois_x_pixel_buffer[0] = slice(*self.wRangeSliderTracePixelX.value)
self.data.rois_x_pixel_trace = self._rois_x_pixel_buffer
def _set_roi_frames(self, new=None):
self.data.roi_frames = slice(*self.wRangeSliderFrame.value)
def _set_roi_spectra(self, new=None):
self.data.roi_spectra = self.spec_slice
def _set_roi_delays(self, new=None):
self.data.roi_delay = self.wRangeSliderPPDelay.slice
def _set_pumped_index(self, new=None):
self.data.unpumped_index = self._unpumped_index_buffer
self.data.pumped_index = self._pumped_index_buffer
def x_spec_renew(self, new={}):
"""Renew calibration according to gui."""
self.data.central_wl = self.wTextCentralWl.value
self.data.vis_wl = self.wTextVisWl.value
def _on_delay_median_clicked(self, new=None):
if self.wCheckDelayMedian.value:
self.wDropdownDelayMode.value = "Region"
def _on_frame_median_clicked(self, new=None):
if self.wCheckFrameMedian.value:
self.wDropdownFrameMode.value = "Region"
self.wSliderFrame.disabled = True
else:
self.wSliderFrame.disabled = False
def _on_frame_mode_changed(self, new=None):
if self.wDropdownFrameMode.value == "Index":
self.wCheckFrameMedian.value = False
def _on_delay_mode_changed(self, new=None):
if self.wDropdownDelayMode.value == "Region":
self.wSliderPPDelay.disabled = True
else:
self.wSliderPPDelay.disabled = False
def _on_calib_changed(self, new=None):
"""Calibration changed."""
self.x_spec_renew()
self.wCheckAutoscale.value = True
def _on_pumped_index_changed(self, new=None):
"""Reset Bleach related properties."""
self._pumped_index_buffer = self.wIntTextPumped.value
self.data.pumped_index = self.wIntTextPumped.value
def _on_unpumped_index_changed(self, new=None):
self._unpumped_index_buffer = self.wIntTextUnpumped.value
self.data.unpumped_index = self.wIntTextUnpumped.value
def _set_zero_time_subtraction(self, new=None):
self.data.zero_time_subtraction = \
self.wCheckShowZeroTimeSubtraction.value
def _set_zero_time_selec(self, new=None):
self.data.zero_time_selec = self.wRangeZeroTime.slice
def _on_baseline_offset_changed(self, new=None):
self.data.baseline_offset = self.wTextBaselineOffset.value
def _on_save_record(self, new=None):
fname = self.folder + '/' + self.wTextSaveRecord.value
data = self.data.keep_frames()
data.save(fname)
def _snap_x_roi(self, new=None):
self.data.rois_x_pixel_trace.append(self.wRangeSliderTracePixelX.slice)
# We want to be able to save the snaps throghout different data sets.
self._rois_x_pixel_buffer = self.data.rois_x_pixel_trace
self._update_figure()
@property
def folder(self):
return os.path.abspath(self.wTextFolder.value)
@property
def fnames(self):
return _filter_fnames(self.wTextFolder.value)
@property
def pp_delay_slice(self):
"""PP Delay index Slice"""
return self.wRangeSliderPPDelay.slice
@property
def pp_delay_selected(self):
if self.wDropdownDelayMode.value == "Index":
return _slider_int_to_slice(self.wSliderPPDelay)
return self.wRangeSliderPPDelay.slice
@property
def frame_selected(self):
"""Gui selected frame slice."""
if self.wDropdownFrameMode.value == "Index":
return _slider_int_to_slice(self.wSliderFrame)
return self.wRangeSliderFrame.slice
@property
def spec_slice(self):
"""Specta slice/Y-Pixel slice."""
sl = self.wRangeSliderPixelY.slice
ret = slice(sl.start, sl.stop, self.wIntTextPixelYStep.value)
return ret
@property
def x_pixel_slice(self):
return self.wRangeSliderPixelX.slice
@property
def x_trace_pixel_slice(self):
"""X Pixel slice."""
return self.wRangeSliderTracePixelX.slice
@property
def x_spec(self):
"""X data of the *Signal* plot. """
if self.wDropdownCalib.value == 'pixel':
x = self.data.pixel
elif self.wDropdownCalib.value == 'wavelength':
x = self.data.wavelength
elif self.wDropdownCalib.value == 'wavenumber':
x = self.data.wavenumber
return x
def spectra(self, prop, kwargs_prop={}):
"""Use settings of the gui to select spectra data from SfgRecord."""
kwargs = dict(
prop=prop,
kwargs_prop=kwargs_prop,
roi_delay=self.pp_delay_selected,
roi_frames=self.frame_selected,
roi_spectra=self.spec_slice,
roi_pixel=self.x_pixel_slice,
frame_med=self.wCheckFrameMedian.value,
delay_mean=self.wCheckDelayMedian.value,
spectra_mean=self.wCheckSpectraMean.value,
medfilt_pixel=self.wIntSliderSmooth.value,
)
return self.data.select(**kwargs)
def trace(self, prop, kwargs_prop={}):
"""Use settings of gui to susbelect data for trace."""
kwargs = dict(
prop=prop,
kwargs_prop=kwargs_prop,
roi_delay=self.pp_delay_slice,
roi_frames=self.frame_selected,
roi_spectra=self.spec_slice,
roi_pixel=self.x_trace_pixel_slice,
frame_med=self.wCheckFrameMedian.value,
spectra_mean=self.wCheckSpectraMean.value,
medfilt_pixel=self.wIntSliderSmooth.value,
)
return self.data.trace(**kwargs)
def select_traces(self, y_property):
"""Use settings of gui to susbelect data for traces."""
kwargs = dict(
y_property=y_property,
x_property='pp_delays',
roi_delay=self.wRangeSliderPPDelay.slice,
roi_frames=self.frame_selected,
roi_spectra=self.spec_slice,
frame_med=self.wCheckFrameMedian.value,
spectra_mean=self.wCheckSpectraMean.value,
pixel_mean=True,
medfilt_pixel=self.wIntSliderSmooth.value,
)
ret_shape = list(self.data.subselect(**kwargs)[1].shape)
ret_shape[3] = len(self.data.rois_x_pixel_trace)
ret = np.zeros(ret_shape)
for i in range(len(self.data.rois_x_pixel_trace)):
roi_x_pixel = self.data.rois_x_pixel_trace[i]
x, y = self.data.subselect(roi_pixel=roi_x_pixel, **kwargs)
ret[:, :, :, i] = y[:, :, :, 0]
return x, ret
@property
def x_vlines(self):
ret = [self.x_spec[self.x_trace_pixel_slice.start],
self.x_spec[self.x_trace_pixel_slice.stop - 1]]
return ret
class WidgetPlots():
"""Plotly Base plotting backend."""
def __init__(self):
import plotly.graph_objs as go
# Plotly figure obj
self.figure = go.Figure()
# List of plotly data object to plot on the figure
self.data = []
# Plotly layout obj for the figure.
self.layout = go.Layout()
def _update_figure(self):
pass
def _init_figure(self):
pass
class WidgetFigures():
"""Collect figure init and update functions within this class"""
axes_grid = np.array([[]]) # a 2d array with the figure axes
@property
def fig(self):
return self._fig
@property
def axes(self):
return self._fig.axes
def redraw_figure(self):
"""This forces matplotlib to update the figure canvas."""
self._fig.canvas.draw()
for ax in self.axes:
ax.figure.canvas.draw()
def _update_figure(self):
# OVERWRITE THIS FUNCTION
pass
def _update_figure_callback(self, new):
"""A callback version of _update_figure for usage in observers."""
self._update_figure()
def init_single_figure(self):
"""Init the fiures and axes"""
try:
conds = (
Counter(self.axes) != Counter(
self.axes_grid.flatten().tolist()
),
len(self.axes) is 0
)
if not self._fig:
self._fig, self.axes_grid = plt.subplots(
1, 1, figsize=self._figsize, squeeze=False
)
# This allows for redrawing the axis on an already existing figure.
elif any(conds):
self._fig.set_size_inches(self._figsize, forward=True)
self.axes_grid = np.array([[self._fig.add_subplot(111)]])
except TypeError:
pass
def init_two_figures(self):
"""Init the two axis figure."""
try:
conds = (
Counter(self.axes) != Counter(
self.axes_grid.flatten().tolist()
),
len(self.axes) is 0
)
if not self._fig:
self._fig, self.axes_grid = plt.subplots(
1, 2, figsize=self._figsize, squeeze=False
)
# This allows for redrawing the axis on an already existing figure.
elif any(conds):
self._fig.set_size_inches(self._figsize, forward=True)
self.axes_grid = np.array([[
self._fig.add_subplot(121),
self._fig.add_subplot(122)
]])
except TypeError:
pass
def _plot_spec(self, xdata, ydata, ax, label_base=""):
"""Plot the basic 4d data types of the data record.
xdata: The x_axis of the plot.
ydata: 4d array.
ax: matplotlib axis."""
initial = True
for delay_index in range(len(ydata)):
delay = ydata[delay_index]
for frame_index in range(len(delay)):
frame = delay[frame_index]
for spectrum_index in range(len(frame)):
spectrum = frame[spectrum_index]
if initial:
initial = False
else:
label_base = ''
label_str = self._append_identifier(label_base).format(
self.pp_delay_selected.start + delay_index,
self.pp_delay_selected.stop,
self.frame_selected.start + frame_index,
self.frame_selected.stop,
self.spec_slice.start + spectrum_index,
self.spec_slice.stop
)
ax.plot(xdata, spectrum, label=label_str)
def _plot_traces(self, xdata, ydata, ax, label_base=''):
initial = True
y = ydata.T
for pixel in y:
for spec in pixel:
for frame in spec:
label_str = label_base + '{:.0f}-{:.0f}'.format(
int(self.data.rois_x_pixel_trace[0].start),
int(self.data.rois_x_pixel_trace[0].stop)
)
ax.plot(xdata, frame.T, '-o', label=label_str)
def _plot_rawData(self, ax):
if not self.wCheckShowRawData.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice],
self.spectra('rawData'), ax, 'RawData\n')
def _plot_basesubed(self, ax):
if not self.wCheckShowBasesubed.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice], self.spectra('basesubed'),
ax, 'Basesubed\n')
def _plot_normalized(self, ax):
if not self.wCheckShowNormalized.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice], self.spectra('normalized'), ax, 'RawData\n')
def _plot_base(self, ax):
if not self.wCheckShowBase.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice], self.spectra('base'), ax, 'RawData\n')
def _plot_norm(self, ax):
if not self.wCheckShowNorm.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice], self.spectra('norm'), ax, 'RawData\n')
def _plot_bleach(self, ax):
if not self.wCheckShowBleach.value:
return
self._plot_spec(
self.x_spec[self.x_pixel_slice],
self.spectra(
'bleach',
kwargs_prop=dict(
opt=self.wDropdownBleachOpt.value,
prop=self.wDropdownBleachProp.value
)
),
ax,
'Bleach\n',
)
def _plot_traces_rawData(self, ax):
if not self.wCheckShowTracesRawData.value:
return
xdata, ydata, yerr = self.trace('rawData')
self._plot_traces(xdata, ydata, ax, 'Raw\n')
def _plot_traces_basesubed(self, ax):
if not self.wCheckShowTracesBasesubed.value:
return
xdata, ydata, yerr = self.trace('basesubed')
self._plot_traces(xdata, ydata, ax, 'Basesubed\n')
def _plot_traces_normalized(self, ax):
if not self.wCheckShowTracesNormalized.value:
return
xdata, ydata, yerr = self.trace('normalized')
self._plot_traces(xdata, ydata, ax, 'Normalized\n')
def _plot_traces_bleach(self, ax):
if not self.wCheckShowTracesBleach.value:
return
xdata, ydata, yerr = self.trace(
'bleach',
kwargs_prop=dict(
opt=self.wDropdownBleachOpt.value,
prop=self.wDropdownBleachProp.value
),
)
self._plot_traces(xdata, ydata, ax, 'Bleach\n')
def plot_spec(self, ax):
self._plot_rawData(ax)
self._plot_basesubed(ax)
self._plot_normalized(ax)
self._plot_base(ax)
self._plot_norm(ax)
self._plot_bleach(ax)
ax.set_title(self._x_spec_title)
ax.set_xlabel(self.x_spec_label)
def plot_traces(self, ax):
self._plot_traces_bleach(ax)
self._plot_traces_rawData(ax)
self._plot_traces_basesubed(ax)
self._plot_traces_normalized(ax)
ax.set_xlabel('pp delay / fs')
ax.set_title('Trace')
ax.legend()
def _on_ax0_lim_changed(self, new=None):
"""Callback for the *Signal* axis."""
# Called when the xlim of the `Signal` plot is changed
self._autoscale_buffer = _lims2buffer(self.axes[0])
def _on_ax1_lim_changed(self, new=None):
# Called when the ylim of the `Signal` plot is changed
self._autoscale_buffer_2 = _lims2buffer(self.axes[1])
@property
def x_spec_label(self):
"""x axis label of the spec plot"""
if self.wDropdownCalib.value == 'wavenumber':
ret = r"Wavenumber/cm$^{-1}$"
elif self.wDropdownCalib.value == 'wavelength':
ret = "Wavelength/nm"
else:
ret = "Pixel"
return ret
@property
def _x_spec_title(self):
"""Title of the spec plot."""
if self.wDropdownDelayMode.value == 'Index':
return "Delay {} fs".format(
self.data.pp_delays[self.pp_delay_selected.start]
)
else:
return "Delay {} - {} fs".format(
self.data.pp_delays[self.data.roi_delay][0],
self.data.pp_delays[self.data.roi_delay][-1]
)
def _append_identifier(self, label_base):
"""Append identifier to label string for plots."""
if self.wCheckDelayMedian.value:
label_base += 'D[{0}:{1}]_'
else:
label_base += 'D[{0}]_'
if self.wCheckFrameMedian.value:
label_base += 'F[{2}:{3}]_'
else:
label_base += 'F[{2}]_'
if self.wCheckSpectraMean.value:
label_base += 'S[{4}:{5}]'
else:
label_base += 'S[{4}]'
return label_base
class BaselineTab(WidgetBase, WidgetFigures):
def __init__(self, figsize=(8, 6), **kwargs):
super().__init__(figsize=figsize, **kwargs)
def _init_widget(self):
"""Init the widgets that are to be shown."""
import ipywidgets as wi
super()._init_widget()
self.wRangeSliderTracePixelX.layout.visibility = 'hidden'
self.wCheckAutoscaleTrace.layout.visibility = 'hidden'
self.wRangeSliderPixelX.layout.visibility = 'hidden'
self.wCheckShowBase.value = False
self.children = wi.VBox([
self._data_box,
self._signal_box,
self._calib_box,
self._save_record_box
])
def _init_figure(self):
"""Init the fiures and axes"""
self.init_single_figure()
def _update_figure(self):
"""Is called on all gui element changes.
This function renders the plot. When ever you want to make changes
visible in the figure you need to call this."""
self._init_figure()
ax = self.axes[0]
ax.clear()
self.plot_spec(ax)
ax.legend(framealpha=0.5)
ax.set_xlabel(self.wDropdownCalib.value)
ax.set_title('Baseline')
ax.callbacks.connect('xlim_changed', self._on_ax0_lim_changed)
ax.callbacks.connect('ylim_changed', self._on_ax0_lim_changed)
if self.wCheckAutoscale.value:
self._autoscale_buffer_2 = _lims2buffer(ax)
else:
_buffer2lims(ax, self._autoscale_buffer_2)
self.redraw_figure()
@property
def to_base(self):
"""Y data to be send on Set Baseline button press."""
return self.spectra('rawData')
class IRTab(WidgetBase, WidgetFigures):
"""Widget to visualize IRTab type data.
IRTab Type data has a SfrRecord and a BaselineTab """
def __init__(self, figsize=(8, 6), **kwargs):
super().__init__(figsize=figsize, **kwargs)
def _init_widget(self):
"""Init the widgets that are to be shown."""
import ipywidgets as wi
super()._init_widget()
# This allows the data to be used for normalization from start on
self.data.rawData += 1
self.wRangeSliderTracePixelX.layout.visibility = 'hidden'
self.wCheckAutoscaleTrace.layout.visibility = 'hidden'
self.wRangeSliderPixelX.layout.visibility = 'hidden'
self.wCheckShowBase.value = False
show_box = wi.HBox([
self.wCheckShowRawData,
self.wCheckShowBasesubed,
self.wCheckShowBase,
])
self.children = wi.VBox([
self._data_box,
self._signal_box,
self.wTextBaselineOffset,
show_box,
self._calib_box,
self._save_record_box
])
def _init_figure(self):
"""Init the fiures and axes"""
self.init_single_figure()
def _update_figure(self):
"""Is called on all gui element changes.
This function renders the plot. When ever you want to make changes
visible in the figure you need to call this."""
self._init_figure()
ax = self.axes[0]
ax.clear()
self.plot_spec(ax)
ax.legend(framealpha=0.5)
ax.set_xlabel(self.wDropdownCalib.value)
ax.set_title('Spectrum')
ax.callbacks.connect('xlim_changed', self._on_ax0_lim_changed)
ax.callbacks.connect('ylim_changed', self._on_ax0_lim_changed)
if self.wCheckAutoscale.value:
self._autoscale_buffer_2 = _lims2buffer(ax)
else:
_buffer2lims(ax, self._autoscale_buffer_2)
self.redraw_figure()
def _init_observer(self):
super()._init_observer()
@property
def to_norm(self):
"""The property that gets exported to the Record tab if one clickes.
Send IR."""
return self.spectra('basesubed')
class RecordTab(WidgetBase, WidgetFigures):
def __init__(self, central_wl=674, vis_wl=812, figsize=(10, 4), **kwargs):
"""Plotting gui based on the SfgRecord class as a data backend.
Parameters
----------
data : Optional, SfgRecord obj.
Default dataset to start with. If None, an empty one is created.
fig: Optional, matplotlib figure
The figure to draw on.
Defaults to create a new one.
ax: Optional, matplotlib axes. The axes to draw on.
Defaults to create a new one.
central_wl: float
Central wavelength of the camera to start with.
If none is given, it tryes to find out from by investigating the
metadata.
vis_wl: float
Wavelength of the visible to begin with.
Example:
-------
test = RecordTab()
test()
# Type the Folder you want to investigate in the folder Text box and
# press RETURN.
# A list of selectable files will appear on the right side.
"""
super().__init__(central_wl=central_wl, vis_wl=vis_wl, figsize=figsize,
**kwargs)
self._ax_xlim = None
self._ax_ylim = None
def _init_figure(self):
"""Init the two axis figure."""
self.init_two_figures()
# TODO Axes is too small on summed
def _init_widget(self):
"""Init all widgets that are to be drawn."""
import ipywidgets as wi
super()._init_widget()
# self.children is the widget we are rendering up on call.
show_box = wi.VBox([
wi.HBox([
self.wSnapXRoi,
self.wTextBaselineOffset,
]),
wi.HBox([
self.wCheckShowRawData,
self.wCheckShowBasesubed,
self.wCheckShowBase,
self.wCheckShowNorm,
self.wCheckShowNormalized,
]),
wi.VBox([
wi.Label("Bleach:"),
wi.HBox([
self.wCheckShowBleach,
self.wDropdownBleachOpt,
self.wDropdownBleachProp,
]),
]),
wi.VBox([
wi.Label("Traces:"),
wi.HBox([
self.wCheckShowTracesRawData,
self.wCheckShowTracesBasesubed,
self.wCheckShowTracesNormalized,
self.wCheckShowTracesBleach,
]),
]),
])
bleach_box = wi.HBox([
self.wIntTextPumped,
self.wIntTextUnpumped,
self.wCheckShowZeroTimeSubtraction,
self.wRangeZeroTime,
])
self.children = wi.VBox([
self._data_box,
self._signal_box,
show_box,
bleach_box,
self._calib_box,
self._save_record_box
])
def _update_figure(self):
"""Update the figure of the gui."""
self._init_figure()
fontsize = 8
ax = self.axes[0]
ax.clear()
self.plot_spec(ax)
ax.set_xticklabels(ax.get_xticks(), fontsize=fontsize)
ax.set_yticklabels(ax.get_yticks(), fontsize=fontsize)
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
ax.callbacks.connect('xlim_changed', self._on_ax0_lim_changed)
ax.callbacks.connect('ylim_changed', self._on_ax0_lim_changed)
if self.wCheckAutoscale.value:
self._autoscale_buffer = _lims2buffer(ax)
else:
_buffer2lims(ax, self._autoscale_buffer)
ax.vlines(self.x_vlines, *ax.get_ylim(),
linestyle="dashed")
ax.legend(framealpha=0.5)
ax = self.axes[1]
ax.clear()
self.plot_traces(ax)
ax.set_xticklabels(ax.get_xticks(), fontsize=fontsize)
ax.set_yticklabels(ax.get_yticks(), fontsize=fontsize)
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3g'))
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
ax.yaxis.tick_right()
ax.callbacks.connect('xlim_changed', self._on_ax1_lim_changed)
ax.callbacks.connect('ylim_changed', self._on_ax1_lim_changed)
if self.wCheckAutoscaleTrace.value:
self._autoscale_buffer_2 = _lims2buffer(ax)
else:
_buffer2lims(ax, self._autoscale_buffer_2)
self.redraw_figure()
# This is broken
class ImgView(WidgetBase):
"""A Class to view full spe images."""
def __init__(self, *args, figsize=(8, 6), **kwargs):
super().__init__(*args, figsize=figsize, **kwargs)
self.axes_grid = np.array([[]])
def _init_figure(self):
if not self._fig:
self._fig = plt.figure(self._figsize)
gs = gridspec.GridSpec(2, 2, width_ratios=[1, 3],
height_ratios=[3, 1])
ax = self._fig.add_subplot(gs[0, 1])
self._fig.add_subplot(gs[0, 0], sharey=ax)
self._fig.add_subplot(gs[1, 1], sharex=ax)
elif self._fig and len(self.axes) is not 3:
self._fig.set_size_inches(self._figsize, forward=True)
gs = gridspec.GridSpec(2, 2, width_ratios=[1, 3],
height_ratios=[3, 1])
ax = self._fig.add_subplot(gs[0, 1])
self._fig.add_subplot(gs[0, 0], sharey=ax)
self._fig.add_subplot(gs[1, 1], sharex=ax)
def _update_figure(self):
self._init_figure()
view_data = self.data.rawData[
self.pp_delay_index, self.frame_index
]
ax = self.axes[0]
plt.sca(ax)
ax.clear()
img = ax.imshow(
view_data,
interpolation=self.w_interpolate.value,
origin="lower",
aspect="auto"
)
plt.colorbar(img)
axl = self.axes[1]
axl.clear()
y_slice = self.wRangeSliderPixelY.slice
view_data2 = self.data.rawData[
self.pp_delay_selected.start, self.wRangeSliderFrame.value[0], y_slice
].sum(Y_PIXEL_INDEX)
axl.plot(view_data2)
def _init_widget(self):
import ipywidgets as wi
super()._init_widget()
self.wIntSliderSmooth.visible = False
self.wIntSliderSmooth.disabled = True
self.wRangeSliderPPDelay.visible = False
self.wRangeSliderPPDelay.disabled = True
self.w_interpolate = wi.Dropdown(
description="Interpolation",
options=('none', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite',
'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel',
'mitchell', 'sinc', 'lanczos'),
value="nearest",
)
self.children = wi.VBox([
self.wVBoxSignal,
wi.HBox(
[self.wDropdownCalib, self.wTextCentralWl, self.wTextVisWl]
),
self.w_interpolate,
])
def _init_observer(self):
super()._init_observer()
self.w_interpolate.observe(self._update_figure_callback, "value")
class Dashboard():
def __init__(self, *args, **kwargs):
self.widgets = args
self.fig = None
self.ax = None
def __call__(self):
pass
class PumpProbe():
"""Tabed dashboard.
The first page shows two axis.
On the first axes one sees the raw signal. And possibly
a baseline. Each y-pixel of the ccd camera gets projected into a single.
spectra line on this first axes. With the *Show Baseline* Button one can
toggle the visibility of the Baseline. Autoscale prevents the axes from
re-scaling up on data change. Numorus sliders allow for inspection of the
data.
The second axes shows the Trace of each spectrum vs pump-probe time delay.
This is only use full if you do pump-probe experiment. Otherwise this axis
will only show to you the a single point with the value of the sum(area) of
the spectrum from axes one.
The second page shows A single Spectrum and possibly a baseline.
The third page shows, after usage of the normalize button the quotient
of the first and the second page spectrum. This allows for IRTab
Normalization."""
def __init__(self):
import ipywidgets as wi
self.tabed_widgets = (
RecordTab(),
IRTab(),
BaselineTab(),
)
self.tab_record = self.tabed_widgets[0]
self.tab_ir = self.tabed_widgets[1]
self.tab_record_baseline = self.tabed_widgets[2]
children = []
self.wi_fig = plt.figure()
# Names given explicitly to preserver order of tabs.
for tabed_widget in self.tabed_widgets:
tabed_widget._conf_widget_with_data()
children.append(tabed_widget.children)
tabed_widget._fig = self.wi_fig
self.w_tabs = wi.Tab(children=children)
names = ("Pump-Probe", "IR", "Baseline")
for i in range(len(names)):
self.w_tabs.set_title(i, names[i])
self.children = self.w_tabs
self.wButtonSetBaseline = wi.Button(description='Set Baseline')
self.wButtonSetIrBaseline = wi.Button(description='Set Ir Baseline')
self.wButtonNormalize = wi.Button(description='Set Normalize')
self.wButtonSaveGui = wi.Button(description='Save Gui')
self.wButtonLoadGui = wi.Button(description='Load Gui')
self.children = wi.VBox([
self.w_tabs,
wi.HBox([
self.wButtonSetBaseline,
self.wButtonSetIrBaseline,
self.wButtonNormalize,
self.wButtonSaveGui,
self.wButtonLoadGui,
])
])
def __call__(self):
from IPython.display import display
for tabed_widget in self.tabed_widgets:
tabed_widget._init_observer()
self._init_observer()
# Render record tab as default.
self.tab_record._update_figure()
display(self.children)
def _init_observer(self):
"""Initialize widgets of the GUI.
Observers within this function interact between tabs, of independent
of tabs.
"""
if debug:
print("Dasboards._init_observer called")
def test_widgets(tab):
"""List of widgets of a tab that change the data such that,
test must be run before Ir or Baseline can be set."""
return (
tab.wSliderPPDelay,
tab.wRangeSliderPPDelay,
tab.wCheckDelayMedian,
tab.wDropdownDelayMode,
tab.wSliderFrame,
tab.wRangeSliderFrame,
tab.wCheckFrameMedian,
tab.wDropdownFrameMode,
tab.wIntSliderSmooth,
tab.wRangeSliderPixelY,
tab.wIntTextPixelYStep,
tab.wSelectFile,
)
self.w_tabs.observe(self._on_tab_changed, 'selected_index')
self.wButtonSetBaseline.on_click(self._on_setBaseline_clicked)
self.wButtonSetIrBaseline.on_click(self._on_setIRBaseline_clicked)
self.wButtonNormalize.on_click(self._on_set_normalize)
for widget in test_widgets(self.tab_ir):
widget.observe(
self._test_normalizability,
"value"
)
for widget in test_widgets(self.tab_record_baseline):
widget.observe(self._test_Record_baseline, "value")
widget.observe(self._test_IR_Baseline, "value")
self.tab_record_baseline.wSelectFile.observe(
self._test_Record_baseline,
"value"
)
self.tab_record_baseline.wSelectFile.observe(
self._test_normalizability,
"value"
)
self.wButtonSaveGui.on_click(self._on_save_gui_clicked)
self.wButtonLoadGui.on_click(self._on_load_gui_clicked)
def _on_tab_changed(self, new):
if debug:
print("Dashboard._on_tab_changed called")
axes = self.wi_fig.axes
for ax in axes:
self.wi_fig.delaxes(ax)
page = self.w_tabs.selected_index
widget = self.tabed_widgets[page]
widget._update_figure()
def _on_setBaseline_clicked(self, new):
"""Called when set baseline is clicked."""
if not self._test_baseline_on_tab(
self.tab_record, self.wButtonSetBaseline
):
return
self.tab_record.data.base = self.tab_record_baseline.to_base
self.wButtonSetBaseline.style.button_color = "green"
self.tabed_widgets[self.w_tabs.selected_index]._update_figure()
def _on_setIRBaseline_clicked(self, new):
"""Called when set ir baseline is clicked."""
if not self._test_baseline_on_tab(
self.tab_ir, self.wButtonSetIrBaseline
):
return
self.tab_ir.data.base = self.tab_record_baseline.to_base
self.wButtonSetIrBaseline.style.button_color = "green"
self.tabed_widgets[self.w_tabs.selected_index]._update_figure()
def _on_set_normalize(self, new):
if debug:
print("Normalize._on_set_normalize called.")
if not self._test_normalizability():
return
self.tab_record.data.norm = self.tab_ir.to_norm
# Update current plot
self.wButtonNormalize.style.button_color = "green"
self.tabed_widgets[self.w_tabs.selected_index]._update_figure()
def _on_save_gui_clicked(self, new):
"""Save gui status to a json text file.
Each tab of the dashboard gets a separate list entry. Each widget value
is saved as an dictionary of widget names and values."""
save_file = self.tab_record.folder + '/.last_state.json'
with open(save_file, 'w') as outfile:
save_list = []
for i in range(len(self.tabed_widgets)):
w = self.tabed_widgets[i]
save_dict = {}
for name, saveable_widget in w._save_widgets.items():
save_dict[name] = saveable_widget.value
save_list.append(save_dict)
dump(save_list, outfile, indent=4,
separators=(',', ': '), sort_keys=True)
def _on_load_gui_clicked(self, new):
def _pop_and_set(name):
value = saved_values.pop(name)
w._save_widgets[name].value = value
def _read_and_set(name):
value = saved_values.get(name)
widget = w._save_widgets.get(name)
if isinstance(value, type(None)):
return
if isinstance(widget, type(None)):
return
widget.value = value
try:
infile = open(self.tab_record.folder + '/.last_state.json', 'r')
except FileNotFoundError:
pass
else:
with infile:
imp = load(infile)
# Loop over tabs
for i in range(len(self.tabed_widgets)):
saved_values = imp[i]
w = self.tabed_widgets[i]
# read folder file and baseline as the first
_pop_and_set('folder')
w._on_folder_submit(None)
_pop_and_set('file')
w._load_data()
w._unobserve_figure()
for name in saved_values.keys():
try:
_read_and_set(name)
except TraitError:
msg = "Can't load {} with value {}".format(
name,
saved_values[name]
)
print(msg)
break
w._init_figure_observers()
self._on_tab_changed(None)
def _test_normalizability(self, new=None):
"""Test if the data of w1 can be used to normalize the data of w0."""
try:
norm = np.ones_like(self.tab_record.data.rawData) *\
self.tab_ir.to_norm
self.wButtonNormalize.style.button_color = 'orange'
if np.all(self.tab_record.data.norm == norm):
self.wButtonNormalize.style.button_color = 'green'
except ValueError:
self.wButtonNormalize.style.button_color = 'red'
return False
return True
def _test_baseline_on_tab(self, tab, button):
"""Test if baseline data of tab can be setted.
tab: tab to set the baselinedata of
button: button that was clicked and that sould be colored accordingly.
"""
try:
base = np.ones_like(tab.data.rawData) *\
self.tab_record_baseline.to_base
button.style.button_color = 'orange'
# Must use _base here because .base has offset correction.
if isinstance(tab.data._base, type(None)):
return True
if np.all(tab.data._base == base):
button.style.button_color = 'green'
except ValueError:
button.style.button_color = 'red'
return False
return True
def _test_IR_Baseline(self, new=None):
return self._test_baseline_on_tab(
self.tab_ir,
self.wButtonSetIrBaseline
)
def _test_Record_baseline(self, new=None):
return self._test_baseline_on_tab(
self.tab_record,
self.wButtonSetBaseline
)
# #### Helper function
def _filter_fnames(folder_path):
"""Return list of known files in a folder."""
fnames = np.sort(glob(os.path.normcase(folder_path + '/*')))
# Only .dat, .spe and .npz are known
mask = [
any(conds) for conds in zip(
[".dat" in s for s in fnames],
[".spe" in s for s in fnames],
[".npz" in s for s in fnames],
)
]
fnames = fnames[np.where(mask)]
# Remove AVG
fnames = fnames[np.where(["AVG" not in s for s in fnames])]
fnames = [os.path.split(elm)[1] for elm in fnames]
return fnames
def _slider_range_to_slice(range_value_tuple, max):
"""Transform a tuple into a slice accounting for overlapping"""
if range_value_tuple[0] != range_value_tuple[1]:
return slice(*range_value_tuple)
if range_value_tuple[1] != max:
return slice(range_value_tuple[0], range_value_tuple[1]+1)
return slice(range_value_tuple[0]-1, range_value_tuple[1])
def _slider_int_to_slice(slider):
return slice(slider.value, slider.value+1)
def to_slice(attribute):
# This can be used as a decorator, to get slices from Rangedwidgets
# I'm currently not using it, beacuse I think its more complicated,
# then explicitly calling the rangeSlider_to_slice function on the
# Sliders.
def _to_slice(f):
def wrapper(self, *args):
widget = getattr(self, attribute)
return slice(*widget.value)
return wrapper
return _to_slice
def _lims2buffer(ax):
"""Set buffer values according to axis"""
buffer = [None, None]
buffer[0] = list(ax.get_xlim())
buffer[1] = list(ax.get_ylim())
return buffer
def _buffer2lims(ax, buffer):
if not isinstance(buffer[0], type(None)):
ax.set_xlim(*buffer[0])
if not isinstance(buffer[1], type(None)):
ax.set_ylim(*buffer[1])
def _set_rangeSlider_num_to_label(lines, sliceObj, label_base=""):
"""Use a rangeSlider, to add rangeSlider values to label_base
lines: The lines to set the label of.
y_slice: The rangeSlider to extract values from
label_base: base string of the label that the number is appended to."""
j = 0
for i in range(*sliceObj.indices(sliceObj.stop)):
label = label_base + str(i)
line = lines[j]
line.set_label(label)
j += 1
class IntRangeSliderGap(IntRangeSlider):
"""A Ranged slider with enforced gap."""
@validate('value')
def enforce_gap(self, proposal):
gap = 1
min, max = proposal.value
oldmin, oldmax = self.value
if min == self.max:
min -= 1
if (max-min) < gap:
if oldmin == min:
# max changed
max = min + gap
else:
min = max - gap
return (min, max)
@property
def slice(self):
return slice(*self.value)
#### End of helper functions
|
deisi/SFG2D
|
sfg2d/widgets.py
|
Python
|
mit
| 66,147
|
[
"Gaussian"
] |
e34dab4bf4e9f6e3ea9f3bed68d124a008785750345bc2e3b7365784fa8f8cb2
|
"""Unit tests for reviewboard.accounts.managers.ReviewRequestVisitManager."""
from django.contrib.auth.models import User
from reviewboard.accounts.models import ReviewRequestVisit
from reviewboard.testing import TestCase
class ReviewRequestVisitTests(TestCase):
"""Unit tests for reviewboard.accounts.managers.ReviewRequestVisitManager.
"""
fixtures = ['test_users']
def test_update_visibility_create(self):
"""Testing ReviewRequestVisitManager.update_visibility
creates a new visit
"""
review_request = self.create_review_request(publish=True)
user = User.objects.get(username='admin')
visit = ReviewRequestVisit.objects.update_visibility(
review_request, user, ReviewRequestVisit.ARCHIVED)
self.assertEqual(visit.visibility, ReviewRequestVisit.ARCHIVED)
def test_update_visibility_update_visible(self):
"""Testing ReviewRequestVisitManager.update_visibility
updates existing visit with visible
"""
review_request = self.create_review_request(publish=True)
user = User.objects.get(username='admin')
ReviewRequestVisit.objects.create(
review_request=review_request, user=user,
visibility=ReviewRequestVisit.VISIBLE)
with self.assertNumQueries(1):
visit = ReviewRequestVisit.objects.update_visibility(
review_request, user, ReviewRequestVisit.VISIBLE)
self.assertEqual(visit.visibility, ReviewRequestVisit.VISIBLE)
def test_update_visibility_update_archive(self):
"""Testing ReviewRequestVisitManager.update_visibility
updates existing visit with archive
"""
review_request = self.create_review_request(publish=True)
user = User.objects.get(username='admin')
ReviewRequestVisit.objects.create(
review_request=review_request, user=user,
visibility=ReviewRequestVisit.VISIBLE)
visit = ReviewRequestVisit.objects.update_visibility(
review_request, user, ReviewRequestVisit.ARCHIVED)
self.assertEqual(visit.visibility, ReviewRequestVisit.ARCHIVED)
|
reviewboard/reviewboard
|
reviewboard/accounts/test_review_request_visit_manager.py
|
Python
|
mit
| 2,167
|
[
"VisIt"
] |
548128721fabc460cd611092fc5d6d684f4db29cd4cd029edf5ca06c0c6e1f92
|
#!/usr/bin/env python
"""Create 3-Layers BEM model from Flash MRI images
This function extracts the BEM surfaces (outer skull, inner skull, and
outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30
degrees. The multiecho FLASH data are inputted in NIFTI format.
It was developed to work for Phillips MRI data, but could probably be
used for data from other scanners that have been converted to NIFTI format
(e.g., using MRIcron's dcm2nii). However,it has been tested only for
data from the Achieva scanner). This function assumes that the Freesurfer
segmentation of the subject has been completed. In particular, the T1.mgz
and brain.mgz MRI volumes should be, as usual, in the subject's mri
directory.
"""
from __future__ import print_function
# Authors: Rey Rene Ramirez, Ph.D. e-mail: rrramir at uw.edu
# Alexandre Gramfort, Ph.D.
import math
import os
import mne
def make_flash_bem(subject, subjects_dir, flash05, flash30, show=False):
"""Create 3-Layers BEM model from Flash MRI images
Parameters
----------
subject : string
Subject name
subjects_dir : string
Directory containing subjects data (Freesurfer SUBJECTS_DIR)
flash05 : string
Full path of the NIFTI file for the
FLASH sequence with a spin angle of 5 degrees
flash30 : string
Full path of the NIFTI file for the
FLASH sequence with a spin angle of 30 degrees
show : bool
Show surfaces in 3D to visually inspect all three BEM
surfaces (recommended)
Notes
-----
This program assumes that both Freesurfer/FSL, and MNE,
including MNE's Matlab Toolbox, are installed properly.
For reference please read the MNE manual and wiki, and Freesurfer's wiki:
http://www.nmr.mgh.harvard.edu/meg/manuals/
http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/sofMNE.php
http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/MNE_register/index.php
http://surfer.nmr.mgh.harvard.edu/
http://surfer.nmr.mgh.harvard.edu/fswiki
References:
B. Fischl, D. H. Salat, A. J. van der Kouwe, N. Makris, F. Segonne,
B. T. Quinn, and A. M. Dale, "Sequence-independent segmentation of magnetic
resonance images," Neuroimage, vol. 23 Suppl 1, pp. S69-84, 2004.
J. Jovicich, S. Czanner, D. Greve, E. Haley, A. van der Kouwe, R. Gollub,
D. Kennedy, F. Schmitt, G. Brown, J. Macfall, B. Fischl, and A. Dale,
"Reliability in multi-site structural MRI studies: effects of gradient
non-linearity correction on phantom and human data," Neuroimage,
vol. 30, Epp. 436-43, 2006.
"""
os.environ['SUBJECT'] = subject
os.chdir(os.path.join(subjects_dir, subject, "mri"))
if not os.path.exists('flash'):
os.mkdir("flash")
os.chdir("flash")
# flash_dir = os.getcwd()
if not os.path.exists('parameter_maps'):
os.mkdir("parameter_maps")
print("--- Converting Flash 5")
os.system('mri_convert -flip_angle %s -tr 25 %s mef05.mgz' %
(5 * math.pi / 180, flash05))
print("--- Converting Flash 30")
os.system('mri_convert -flip_angle %s -tr 25 %s mef30.mgz' %
(30 * math.pi / 180, flash30))
print("--- Running mne_flash_bem")
os.system('mne_flash_bem --noconvert')
os.chdir(os.path.join(subjects_dir, subject, 'bem'))
if not os.path.exists('flash'):
os.mkdir("flash")
os.chdir("flash")
print("[done]")
if show:
fnames = ['outer_skin.surf', 'outer_skull.surf', 'inner_skull.surf']
head_col = (0.95, 0.83, 0.83) # light pink
skull_col = (0.91, 0.89, 0.67)
brain_col = (0.67, 0.89, 0.91) # light blue
colors = [head_col, skull_col, brain_col]
from enthought.mayavi import mlab
mlab.clf()
for fname, c in zip(fnames, colors):
points, faces = mne.read_surface(fname)
mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces,
color=c, opacity=0.3)
mlab.show()
if __name__ == '__main__':
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
subject = os.environ.get('SUBJECT')
subjects_dir = os.environ.get('SUBJECTS_DIR')
parser.add_option("-s", "--subject", dest="subject",
help="Subject name", default=subject)
parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
help="Subjects directory", default=subjects_dir)
parser.add_option("-5", "--flash05", dest="flash05",
help=("Path to FLASH sequence with a spin angle of 5 "
"degrees in Nifti format"), metavar="FILE")
parser.add_option("-3", "--flash30", dest="flash30",
help=("Path to FLASH sequence with a spin angle of 30 "
"degrees in Nifti format"), metavar="FILE")
parser.add_option("-v", "--view", dest="show", action="store_true",
help="Show BEM model in 3D for visual inspection",
default=False)
options, args = parser.parse_args()
subject = options.subject
subjects_dir = options.subjects_dir
flash05 = os.path.abspath(options.flash05)
flash30 = os.path.abspath(options.flash30)
show = options.show
make_flash_bem(subject, subjects_dir, flash05, flash30, show=show)
|
jaeilepp/eggie
|
mne/commands/mne_flash_bem_model.py
|
Python
|
bsd-2-clause
| 5,454
|
[
"Mayavi"
] |
f7fcf89ca8b5b3043ccd1513bf3f31deb96d01ac43c185dc7baebb20ac25f41a
|
# GromacsWrapper: scaling.py
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
:mod:`gromacs.scaling` -- Partial tempering
===========================================
:Author: Jan Domanski, @jandom
.. versionadded:: 0.5.0
Helper functions for scaling gromacs topologies; useful for setting up
simulations with Hamiltonian replicate exchange and partial tempering
(REST2).
.. autofunction:: scale_dihedrals
.. autofunction:: scale_impropers
.. autofunction:: partial_tempering
"""
from __future__ import absolute_import, division, print_function
import math
import copy
import logging
import numpy as np
from .fileformats import TOP
from .fileformats import blocks
logger = logging.getLogger("gromacs.scaling")
def scale_dihedrals(mol, dihedrals, scale, banned_lines=None):
"""Scale dihedral angles"""
if banned_lines is None:
banned_lines = []
new_dihedrals = []
for dh in mol.dihedrals:
atypes = dh.atom1.get_atomtype(), dh.atom2.get_atomtype(), dh.atom3.get_atomtype(), dh.atom4.get_atomtype()
atypes = [a.replace("_", "").replace("=","") for a in atypes]
# special-case: this is a [ dihedral ] override in molecule block, continue and don't match
if dh.gromacs['param'] != []:
for p in dh.gromacs['param']:
p['kch'] *= scale
new_dihedrals.append(dh)
continue
for iswitch in range(32):
if (iswitch%2==0 ):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3]
else:
a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0]
if((iswitch//2)%2==1): a1="X";
if((iswitch//4)%2==1): a2="X";
if((iswitch//8)%2==1): a3="X";
if((iswitch//16)%2==1): a4="X";
key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, dh.gromacs['func'])
if (key in dihedrals):
for i, dt in enumerate(dihedrals[key]):
dhA = copy.deepcopy(dh)
param = copy.deepcopy(dt.gromacs['param'])
# Only check the first dihedral in a list
if not dihedrals[key][0].line in banned_lines:
for p in param: p['kchi'] *= scale
dhA.gromacs['param'] = param
#if key == "CT3-C-NH1-CT1-9": print i, dt, key
if i == 0:
dhA.comment = "; banned lines {0} found={1}\n".format(" ".join(
map(str, banned_lines)), 1 if dt.line in banned_lines else 0)
dhA.comment += "; parameters for types {}-{}-{}-{}-9 at LINE({})\n".format(
dhA.atom1.atomtype, dhA.atom2.atomtype, dhA.atom3.atomtype,
dhA.atom4.atomtype, dt.line).replace("_","")
name = "{}-{}-{}-{}-9".format(dhA.atom1.atomtype, dhA.atom2.atomtype,
dhA.atom3.atomtype, dhA.atom4.atomtype).replace("_","")
#if name == "CL-CTL2-CTL2-HAL2-9": print dihedrals[key], key
new_dihedrals.append(dhA)
break
mol.dihedrals = new_dihedrals
#assert(len(mol.dihedrals) == new_dihedrals)
return mol
def scale_impropers(mol, impropers, scale, banned_lines=None):
"""Scale improper dihedrals"""
if banned_lines is None:
banned_lines = []
new_impropers = []
for im in mol.impropers:
atypes = (im.atom1.get_atomtype(), im.atom2.get_atomtype(),
im.atom3.get_atomtype(), im.atom4.get_atomtype())
atypes = [a.replace("_", "").replace("=", "") for a in atypes]
# special-case: this is a [ dihedral ] override in molecule block, continue and don't match
if im.gromacs['param'] != []:
for p in im.gromacs['param']:
p['kpsi'] *= scale
new_impropers.append(im)
continue
for iswitch in range(32):
if (iswitch%2==0):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3];
else:
a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0];
if((iswitch//2)%2==1): a1="X";
if((iswitch//4)%2==1): a2="X";
if((iswitch//8)%2==1): a3="X";
if((iswitch//16)%2==1): a4="X";
key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs['func'])
if (key in impropers):
for i, imt in enumerate(impropers[key]):
imA = copy.deepcopy(im)
param = copy.deepcopy(imt.gromacs['param'])
# Only check the first dihedral in a list
if not impropers[key][0].line in banned_lines:
for p in param: p['kpsi'] *= scale
imA.gromacs['param'] = param
if i == 0:
imA.comment = "; banned lines {0} found={1}\n ; parameters for types {2}-{3}-{4}-{5}-9 at LINE({6})\n".format(
" ".join(map(str, banned_lines)),
1 if imt.line in banned_lines else 0,
imt.atype1, imt.atype2, imt.atype3, imt.atype4, imt.line)
new_impropers.append(imA)
break
#assert(len(mol.impropers) == new_impropers)
mol.impropers = new_impropers
return mol
def partial_tempering(topfile="processed.top", outfile="scaled.top", banned_lines='',
scale_lipids=1.0, scale_protein=1.0):
"""Set up topology for partial tempering (REST2) replica exchange.
.. versionchanged:: 0.7.0
Use keyword arguments instead of an `args` Namespace object.
"""
banned_lines = map(int, banned_lines.split())
top = TOP(topfile)
groups = [("_", float(scale_protein)), ("=", float(scale_lipids))]
#
# CMAPTYPES
#
cmaptypes = []
for ct in top.cmaptypes:
cmaptypes.append(ct)
for gr, scale in groups:
ctA = copy.deepcopy(ct)
ctA.atype1 += gr
ctA.atype2 += gr
ctA.atype3 += gr
ctA.atype4 += gr
ctA.atype8 += gr
ctA.gromacs['param'] = [ v*scale for v in ct.gromacs['param'] ]
cmaptypes.append(ctA)
logger.debug("cmaptypes was {0}, is {1}".format(len(top.cmaptypes), len(cmaptypes)))
top.cmaptypes = cmaptypes
#
# ATOMTYPES
#
atomtypes = []
for at in top.atomtypes:
atomtypes.append(at)
for gr, scale in groups:
atA = copy.deepcopy(at)
atA.atnum = atA.atype
atA.atype += gr
atA.gromacs['param']['lje'] *= scale
atomtypes.append(atA)
top.atomtypes = atomtypes
#
# PAIRTYPES
#
pairtypes = []
for pt in top.pairtypes:
pairtypes.append(pt)
for gr, scale in groups:
ptA = copy.deepcopy(pt)
ptA.atype1 += gr
ptA.atype2 += gr
ptA.gromacs['param']['lje14'] *= scale
pairtypes.append(ptA)
top.pairtypes = pairtypes
#
# BONDTYPES
#
bondtypes = []
for bt in top.bondtypes:
bondtypes.append(bt)
for gr, scale in groups:
btA = copy.deepcopy(bt)
btA.atype1 += gr
btA.atype2 += gr
bondtypes.append(btA)
top.bondtypes = bondtypes
#
# ANGLETYPES
#
angletypes = []
for at in top.angletypes:
angletypes.append(at)
for gr, scale in groups:
atA = copy.deepcopy(at)
atA.atype1 += gr
atA.atype2 += gr
atA.atype3 += gr
angletypes.append(atA)
top.angletypes = angletypes
#
# Build dihedral dictionary
#
dihedraltypes = {}
for dt in top.dihedraltypes:
dt.disabled = True
dt.comment = "; type={0!s}-{1!s}-{2!s}-{3!s}-9\n; LINE({4:d}) ".format(
dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.line)
dt.comment = dt.comment.replace("_","")
#if "X-CTL2-CTL2-X-9" in dt.comment: print dt
name = "{0}-{1}-{2}-{3}-{4}".format(dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs['func'])
if not name in dihedraltypes:
dihedraltypes[name] = []
dihedraltypes[name].append(dt)
logger.debug("Build dihedraltypes dictionary with {0} entries".format(len(dihedraltypes)))
#
# Build improper dictionary
#
impropertypes = {}
for it in top.impropertypes:
it.disabled = True
it.comment = "; LINE({0:d}) ".format(it.line)
name = "{0}-{1}-{2}-{3}-{4}".format(
it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs['func'])
if not name in impropertypes:
impropertypes[name] = []
impropertypes[name].append(it)
logger.debug("Build impropertypes dictionary with {0} entries".format(len(impropertypes)))
for molname_mol in top.dict_molname_mol:
if not 'Protein' in molname_mol:
continue
mol = top.dict_molname_mol[molname_mol]
for at in mol.atoms:
at.charge *= math.sqrt(scale_protein)
mol = scale_dihedrals(mol, dihedraltypes, scale_protein, banned_lines)
mol = scale_impropers(mol, impropertypes, 1.0, banned_lines)
top.write(outfile)
|
Becksteinlab/GromacsWrapper
|
gromacs/scaling.py
|
Python
|
gpl-3.0
| 11,438
|
[
"Gromacs"
] |
f23762cbb629a584aa390f65edf463c0124a2d456fd965901107dd93e400f938
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import numpy as np
import lsst.geom
import lsst.pipe.base as pipeBase
import lsst.pex.config as pexConfig
from lsst.daf.base import PropertyList
from lsst.pipe.base import connectionTypes
from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration
from lsst.ts.wep.WfEstimator import WfEstimator
from lsst.ts.wep.Utility import getConfigDir, DonutTemplateType, DefocalType
from lsst.ts.wep.cwfs.DonutTemplateFactory import DonutTemplateFactory
from lsst.ts.wep.task.CombineZernikesSigmaClipTask import CombineZernikesSigmaClipTask
from scipy.signal import correlate
from scipy.ndimage import rotate
from lsst.ts.wep.task.DonutStamps import DonutStamp, DonutStamps
class EstimateZernikesBaseConnections(
pipeBase.PipelineTaskConnections, dimensions=("detector", "instrument")
):
donutCatalog = connectionTypes.Input(
doc="Donut Locations",
dimensions=(
"visit",
"detector",
"instrument",
),
storageClass="DataFrame",
name="donutCatalog",
multiple=True,
)
camera = connectionTypes.PrerequisiteInput(
name="camera",
storageClass="Camera",
doc="Input camera to construct complete exposures.",
dimensions=["instrument"],
isCalibration=True,
lookupFunction=lookupStaticCalibration,
)
donutStampsExtra = connectionTypes.Output(
doc="Extra-focal Donut Postage Stamp Images",
dimensions=("visit", "detector", "instrument"),
storageClass="StampsBase",
name="donutStampsExtra",
multiple=True,
)
donutStampsIntra = connectionTypes.Output(
doc="Intra-focal Donut Postage Stamp Images",
dimensions=("visit", "detector", "instrument"),
storageClass="StampsBase",
name="donutStampsIntra",
multiple=True,
)
outputZernikesRaw = connectionTypes.Output(
doc="Zernike Coefficients from all donuts",
dimensions=("visit", "detector", "instrument"),
storageClass="NumpyArray",
name="zernikeEstimateRaw",
multiple=True,
)
outputZernikesAvg = connectionTypes.Output(
doc="Zernike Coefficients averaged over donuts",
dimensions=("visit", "detector", "instrument"),
storageClass="NumpyArray",
name="zernikeEstimateAvg",
multiple=True,
)
class EstimateZernikesBaseConfig(
pipeBase.PipelineTaskConfig, pipelineConnections=EstimateZernikesBaseConnections
):
# Config setting for pipeline task with defaults
donutTemplateSize = pexConfig.Field(
doc="Size of Template in pixels", dtype=int, default=160
)
donutStampSize = pexConfig.Field(
doc="Size of donut stamps in pixels", dtype=int, default=160
)
initialCutoutPadding = pexConfig.Field(
doc=str(
"Additional padding in pixels on each side of initial "
+ "postage stamp of donutStampSize "
+ "to make sure we have a stamp of donutStampSize after recentroiding donut"
),
dtype=int,
default=40,
)
combineZernikes = pexConfig.ConfigurableField(
target=CombineZernikesSigmaClipTask,
doc=str(
"Choice of task to combine the Zernikes from pairs of "
+ "donuts into a single value for the detector. (The default "
+ "is CombineZernikesSigmaClipTask.)"
),
)
class EstimateZernikesBaseTask(pipeBase.PipelineTask):
"""
Base class for Zernike estimation tasks.
Subclasses must implement _DefaultName.
"""
ConfigClass = EstimateZernikesBaseConfig
# _DefaultName implemented here in subclass
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Set size (in pixels) of donut template image used for
# final centroiding by convolution of initial cutout with template
self.donutTemplateSize = self.config.donutTemplateSize
# Set final size (in pixels) of postage stamp images returned as
# DonutStamp objects
self.donutStampSize = self.config.donutStampSize
# Add this many pixels onto each side of initial
# cutout stamp beyond the size specified
# in self.donutStampSize. This makes sure that
# after recentroiding the donut from the catalog
# position by convolving a template on the initial
# cutout stamp we will still have a postage stamp
# of size self.donutStampSize.
self.initialCutoutPadding = self.config.initialCutoutPadding
# Choice of task to combine the Zernike coefficients
# from individual pairs of donuts into a single array
# for the detector.
self.combineZernikes = self.config.combineZernikes
self.makeSubtask("combineZernikes")
def getTemplate(self, detectorName, defocalType):
"""
Get the templates for the detector.
Parameters
----------
detectorName: str
Name of the CCD (e.g. 'R22_S11').
defocalType: enum 'DefocalType'
Defocal type of the donut image.
Returns
-------
numpy.ndarray
Template donut for the detector and defocal type.
"""
templateMaker = DonutTemplateFactory.createDonutTemplate(
DonutTemplateType.Model
)
template = templateMaker.makeTemplate(
detectorName, defocalType, self.donutTemplateSize
)
return template
def shiftCenter(self, center, boundary, distance):
"""Shift the center if its distance to boundary is less than required.
Parameters
----------
center : float
Center point.
boundary : float
Boundary point.
distance : float
Required distance.
Returns
-------
float
Shifted center.
"""
# Distance between the center and boundary
delta = boundary - center
# Shift the center if needed
if abs(delta) < distance:
return boundary - np.sign(delta) * distance
else:
return center
def calculateFinalCentroid(self, exposure, template, xCent, yCent):
"""
Recentroid donut from catalog values by convolving with template.
Also return the appropriate corner values for the final donutStamp
taking into account donut possibly being near the edges of the
exposure and compensating appropriately.
Parameters
----------
exposure: lsst.afw.image.Exposure
Exposure with the donut image.
template: numpy ndarray
Donut template for the exposure.
xCent: int
X pixel donut center from donutCatalog.
yCent: int
Y pixel donut center from donutCatalog.
Returns
-------
int
Final donut x centroid pixel position on exposure.
int
Final donut y centroid pixel position on exposure.
int
Final x corner position on exposure for donutStamp BBox.
int
Final y corner position on exposure for donutStamp BBox.
"""
expDim = exposure.getDimensions()
initialCutoutSize = self.donutStampSize + (2 * self.initialCutoutPadding)
initialHalfWidth = int(initialCutoutSize / 2)
stampHalfWidth = int(self.donutStampSize / 2)
# Shift stamp center if necessary
xCent = self.shiftCenter(xCent, expDim.getX(), initialHalfWidth)
xCent = self.shiftCenter(xCent, 0, initialHalfWidth)
yCent = self.shiftCenter(yCent, expDim.getY(), initialHalfWidth)
yCent = self.shiftCenter(yCent, 0, initialHalfWidth)
# Stamp BBox defined by corner pixel and extent
initXCorner = xCent - initialHalfWidth
initYCorner = yCent - initialHalfWidth
# Define BBox and get cutout from exposure
initCornerPoint = lsst.geom.Point2I(initXCorner, initYCorner)
initBBox = lsst.geom.Box2I(
initCornerPoint, lsst.geom.Extent2I(initialCutoutSize)
)
initialCutout = exposure[initBBox]
# Find the centroid by finding the max point in an initial
# cutout convolved with a template
correlatedImage = correlate(initialCutout.image.array, template)
maxIdx = np.argmax(correlatedImage)
maxLoc = np.unravel_index(maxIdx, np.shape(correlatedImage))
# The actual donut location is at the center of the template
# But the peak of correlation will correspond to the [0, 0]
# corner of the template
templateHalfWidth = int(self.donutTemplateSize / 2)
newX = maxLoc[1] - templateHalfWidth
newY = maxLoc[0] - templateHalfWidth
finalDonutX = xCent + (newX - initialHalfWidth)
finalDonutY = yCent + (newY - initialHalfWidth)
# Shift stamp center if necessary but not final centroid definition
xStampCent = self.shiftCenter(finalDonutX, expDim.getX(), stampHalfWidth)
xStampCent = self.shiftCenter(xStampCent, 0, stampHalfWidth)
yStampCent = self.shiftCenter(finalDonutY, expDim.getY(), stampHalfWidth)
yStampCent = self.shiftCenter(yStampCent, 0, stampHalfWidth)
# Define corner for final stamp BBox
xCorner = xStampCent - stampHalfWidth
yCorner = yStampCent - stampHalfWidth
return finalDonutX, finalDonutY, xCorner, yCorner
def cutOutStamps(self, exposure, donutCatalog, defocalType, cameraName):
"""
Cut out postage stamps for sources in catalog.
Parameters
----------
exposure: lsst.afw.image.Exposure
Post-ISR image with defocal donuts sources.
donutCatalog: pandas DataFrame
Source catalog for the pointing.
defocalType: enum 'DefocalType'
Defocal type of the donut image.
cameraName: str
Name of camera for the exposure. Can accept "LSSTCam"
or "LSSTComCam".
Returns
-------
DonutStamps
Collection of postage stamps as
lsst.afw.image.maskedImage.MaskedImage with additional metadata.
"""
detectorName = exposure.getDetector().getName()
template = self.getTemplate(detectorName, defocalType)
# Final list of DonutStamp objects
finalStamps = []
# Final locations of donut centroids in pixels
finalXCentList = []
finalYCentList = []
# Final locations of BBox corners for DonutStamp images
xCornerList = []
yCornerList = []
for donutRow in donutCatalog.to_records():
# Make an initial cutout larger than the actual final stamp
# so that we can centroid to get the stamp centered exactly
# on the donut
xCent = int(donutRow["centroid_x"])
yCent = int(donutRow["centroid_y"])
# Adjust the centroid coordinates from the catalog by convolving
# the postage stamp with the donut template and return
# the new centroid position as well as the corners of the
# postage stamp to cut out of the exposure.
finalDonutX, finalDonutY, xCorner, yCorner = self.calculateFinalCentroid(
exposure, template, xCent, yCent
)
finalXCentList.append(finalDonutX)
finalYCentList.append(finalDonutY)
# Get the final cutout
finalCorner = lsst.geom.Point2I(xCorner, yCorner)
finalBBox = lsst.geom.Box2I(
finalCorner, lsst.geom.Extent2I(self.donutStampSize)
)
xCornerList.append(xCorner)
yCornerList.append(yCorner)
finalCutout = exposure[finalBBox]
# Save MaskedImage to stamp
finalStamp = finalCutout.getMaskedImage()
finalStamps.append(
DonutStamp(
stamp_im=finalStamp,
sky_position=lsst.geom.SpherePoint(
donutRow["coord_ra"],
donutRow["coord_dec"],
lsst.geom.radians,
),
centroid_position=lsst.geom.Point2D(finalDonutX, finalDonutY),
detector_name=detectorName,
cam_name=cameraName,
defocal_type=defocalType.value,
)
)
catalogLength = len(donutCatalog)
stampsMetadata = PropertyList()
stampsMetadata["RA_DEG"] = np.degrees(donutCatalog["coord_ra"].values)
stampsMetadata["DEC_DEG"] = np.degrees(donutCatalog["coord_dec"].values)
stampsMetadata["DET_NAME"] = np.array([detectorName] * catalogLength, dtype=str)
stampsMetadata["CAM_NAME"] = np.array([cameraName] * catalogLength, dtype=str)
stampsMetadata["DFC_TYPE"] = np.array([defocalType.value] * catalogLength)
# Save the centroid values
stampsMetadata["CENT_X"] = np.array(finalXCentList)
stampsMetadata["CENT_Y"] = np.array(finalYCentList)
# Save the corner values
stampsMetadata["X0"] = np.array(xCornerList)
stampsMetadata["Y0"] = np.array(yCornerList)
return DonutStamps(finalStamps, metadata=stampsMetadata)
def estimateZernikes(self, donutStampsExtra, donutStampsIntra):
"""
Take the donut postage stamps and estimate the Zernike coefficients.
Parameters
----------
donutStampsExtra: DonutStamps
Extra-focal donut postage stamps.
donutStampsIntra: DonutStamps
Intra-focal donut postage stamps.
Returns
-------
numpy.ndarray
Zernike coefficients for the exposure. Will return one set of
coefficients per set of stamps, not one set of coefficients
per detector so this will be a 2-D numpy array with
the number of rows equal to the number of donut stamps and
the number of columns equal to the number of Zernike coefficients.
"""
zerArray = []
configDir = getConfigDir()
instDir = os.path.join(configDir, "cwfs", "instData")
algoDir = os.path.join(configDir, "cwfs", "algo")
wfEsti = WfEstimator(instDir, algoDir)
wfEsti.config(sizeInPix=self.donutStampSize)
for donutExtra, donutIntra in zip(donutStampsExtra, donutStampsIntra):
fieldXYExtra = donutExtra.calcFieldXY()
fieldXYIntra = donutIntra.calcFieldXY()
camera = donutExtra.getCamera()
detectorExtra = camera.get(donutExtra.detector_name)
detectorIntra = camera.get(donutIntra.detector_name)
eulerZExtra = detectorExtra.getOrientation().getYaw().asDegrees()
eulerZIntra = detectorIntra.getOrientation().getYaw().asDegrees()
# NOTE: TS_WEP expects these images to be transposed
# TODO: Look into this
wfEsti.setImg(
fieldXYExtra,
DefocalType.Extra,
image=rotate(donutExtra.stamp_im.getImage().getArray(), eulerZExtra).T,
)
wfEsti.setImg(
fieldXYIntra,
DefocalType.Intra,
image=rotate(donutIntra.stamp_im.getImage().getArray(), eulerZIntra).T,
)
wfEsti.reset()
zer4UpNm = wfEsti.calWfsErr()
zer4UpMicrons = zer4UpNm * 1e-3
zerArray.append(zer4UpMicrons)
return np.array(zerArray)
def getCombinedZernikes(self, zernikeArray):
"""
Combine the Zernike coefficients from stamp pairs on the
CCD to create one final value for the CCD.
Parameters
----------
zernikeArray: numpy ndarray
The full set of zernike coefficients for each pair
of donuts on the CCD. Each row of the array should
be the set of Zernike coefficients for a single
donut pair.
Returns
-------
struct : `lsst.pipe.base.Struct`
The struct contains the following data:
- combinedZernikes : numpy.ndarray
The final combined Zernike coefficients from the CCD.
- combineFlags : numpy.ndarray
Flag indicating a particular set of Zernike
coefficients was not used in the final estimate.
If the values in a row in the `zernikeArray`
were used then its index is 0.
A value of 1 means the coefficients from that row
in the input `zernikeArray` were not used.
"""
return self.combineZernikes.run(zernikeArray)
|
lsst-ts/ts_wep
|
python/lsst/ts/wep/task/EstimateZernikesBase.py
|
Python
|
gpl-3.0
| 17,761
|
[
"VisIt"
] |
c1271716797bb66b13cef987adf166696ba33443548da3fb225ddc1e1d8e7523
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
"""Check consistency on dataset iris."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
"""Check consistency on dataset boston house prices."""
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
"""Regression models should not have a classes_ attribute."""
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
"""Predict probabilities."""
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importance(name, X, y):
"""Check variable importances."""
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importance, name, X, y
def check_oob_score(name, X, y, n_estimators=20):
"""Check that oob prediction is a good estimation of the generalization
error."""
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
"""Check that base trees can be grid-searched."""
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
"""Check pickability."""
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
"""Check estimators on multi-output problems."""
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
"""Test that n_classes_ and classes_ have proper shape."""
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning a dense array.
'''
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning the same array for both argument
values.
'''
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
"""Test precedence of max_leaf_nodes over max_depth. """
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
"""Test if leaves contain more than leaf_count training examples"""
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_memory_layout(name, dtype):
"""Check that it works no matter the memory layout"""
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_warm_start(name, random_state=42):
"""Test if fitting incrementally with warm start gives a forest of the
right size and the same results as a normal fit."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
"""Test if fit clears state and grows a new forest when warm_start==False.
"""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
"""Test if warm start second fit with smaller n_estimators raises error."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
"""Test if warm start with equal n_estimators does nothing and returns the
same forest and raises a warning."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
"""Test that the warm start computes oob score when asked."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
if __name__ == "__main__":
import nose
nose.runmodule()
|
Garrett-R/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
Python
|
bsd-3-clause
| 28,031
|
[
"Brian"
] |
9c89ce3a5a948020bd7f998038057a440878fb0c3747cb925782ea8db3dfc46c
|
#!/usr/bin/env python
#
# ObsPlan.py
#
# Elisa Antolini
# Jeremy Heyl
# UBC Southern Observatory
#
# This script takes the LIGO-Virgo Skymap (P(d|m)) and optionally a
# galaxy-density map (P(m)) and finds the most likely fields to
# observe (P(m|d)). The fields are assumed to be healpix regions from a
# tesselation with a given value of nside (the value of nside
# depends of the field of view of the telescope).
#
# P(position|data) = P(position) P(data|position) / P(data)
#
# P(position) is the galaxy density map ( P(m) )
# P(data|position) is the skymap from LIGO-Virgo ( P(d|m) )
# P(data) is constant with position so we neglect it.
#
#
#
# usage: ObsPlan.py [-h] [--gal-map GAL_MAP] [--nvalues NVALUES]
# [--cumprob CUMPROB] [--savefigures] [--no-savefigures]
# [--textoutput] [--no-textoutput]
# sky-map nside
#
#
# nside = ceil ( sqrt (3/Pi) 60 / s )
#
# where s is the length of one side of the square field of view in degrees.
#
#
# Questions: heyl@phas.ubc.ca
#
# Copyright 2015, Elisa Antolini and Jeremy Heyl
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from argparse import ArgumentParser
import math as mt
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
import sys
def IndexToDeclRa(NSIDE,index):
theta,phi=hp.pixelfunc.pix2ang(NSIDE,index)
return np.degrees(mt.pi/2.-theta),np.degrees(phi)
def DeclRaToIndex(decl,RA,NSIDE):
return hp.pixelfunc.ang2pix(NSIDE,np.radians(90.-decl),np.radians(RA))
def PlotMap(Map,NsideMap,MapName):
hp.mollview(Map,coord='C',rot = [0,0.3], title='Histogram-Equalized Probability Density Map', unit='prob', xsize=NsideMap)
hp.graticule()
plt.savefig(MapName)
def isPower(num, base):
if base == 1 and num != 1: return False
if base == 1 and num == 1: return True
if base == 0 and num != 1: return False
power = int (mt.log (num, base) + 0.5)
return base ** power == num
def MakeObsPlan(SkyMap_name,nside,SaveFigures,nvalues=None,
cumprob=None,DensityMap_name=None,
TextOutput=False):
#Check if the nside is a power of two
val = isPower(nside,2)
if val == False:
print(" **************** WARNING **************** ")
print("The inserted NSIDE is not a power of two")
y = np.log2(nside)
exp = int(y)
if (exp + 0.5) < y :
exp = exp +1
nside = int(np.power(2,exp))
print("The nearest NSIDE applicable is "+str(nside))
print(" ****************************************** ")
nside_DensityMap = 0
if DensityMap_name != None :
#Load the Glaxy Density Map P(m)
Densitymap_Ring = hp.read_map(DensityMap_name,0)
nside_DensityMap = hp.pixelfunc.get_nside(Densitymap_Ring)
galpixels_DensityMap = np.asarray(Densitymap_Ring)
if SaveFigures :
PlotMap(galpixels_DensityMap,nside_DensityMap,'./GalaxyDensityMap.png')
#Load the Sky Map from LIGO-Virgo ( P(d|m) )
Skymap_Ring = hp.read_map(SkyMap_name,0)
nside_SkyMap = hp.pixelfunc.get_nside(Skymap_Ring)
galpixels_SkyMap = np.asarray(Skymap_Ring)
if SaveFigures:
PlotMap(galpixels_SkyMap,nside_SkyMap,'./LIGOSkyMap.png')
#Resize the Sky Map if necessary
if nside_SkyMap != nside:
galpixels_SkyMap = hp.pixelfunc.ud_grade(galpixels_SkyMap,nside_out = nside, order_in = 'RING', order_out = 'RING')
if SaveFigures:
PlotMap(galpixels_SkyMap,nside,'./LIGOSkyMapResized.png')
#Resize Galaxy Density Map if necessary
if DensityMap_name != None :
if nside_DensityMap != nside:
galpixels_DensityMap = hp.pixelfunc.ud_grade(galpixels_DensityMap,nside_out = nside, order_in = 'RING', order_out = 'RING')
galpixels_DensityMap = np.where(galpixels_DensityMap>0,galpixels_DensityMap,0)
if SaveFigures:
PlotMap(galpixels_DensityMap,nside,'./GalaxyDensityMapResized.png')
Map_Position_Data = np.zeros(hp.nside2npix(nside))
# Multiply the resulting maps together ->
# P(position|data) = P(position) P(data|position)
if DensityMap_name != None :
Map_Position_Data = galpixels_SkyMap * galpixels_DensityMap
else :
Map_Position_Data = galpixels_SkyMap
# Normalize to 1 the sum of the pixels
Map_Position_Data/=np.sum(Map_Position_Data)
if SaveFigures:
PlotMap(Map_Position_Data,nside,'./MapPositionData.png')
# Sort the array by the probability
# Sort from the largest to the smallest
healpixno=np.argsort(-Map_Position_Data)
Map_Position_Data=Map_Position_Data[healpixno]
# accumulate the probability
probsum=np.cumsum(Map_Position_Data)
dec, ra = IndexToDeclRa(nside,healpixno)
if TextOutput:
np.savetxt("SkyMap_OutFile.txt.gz",
np.transpose([healpixno,ra,dec,
Map_Position_Data,probsum,
np.arange(1,len(probsum)+1)]),
fmt="%16d %10.5f %10.5f %10.5f %10.5f %16d",
header="Healpix Number| RA| Dec|Probability|Cumulative Prob | Number of Fields")
else:
np.savez("SkyMap_OutFile",
healpixno=healpixno,ra=ra,dec=dec,
prob=Map_Position_Data,probsum=probsum)
if nvalues != None:
print("# %d most probable values :" % nvalues)
ii=np.arange(nvalues)
np.savetxt(sys.stdout,
np.transpose([healpixno[ii],ra[ii],dec[ii],
Map_Position_Data[ii],probsum[ii],ii+1]),
fmt="%16d %10.5f %10.5f %10.5f %10.5f %16d",
header="Healpix Number| RA| Dec|Probability|Cumulative Prob | Number of Fields")
if cumprob != None:
print("# Most probable values with cumprob < %g" % cumprob)
ii=(probsum<cumprob)
hpn=healpixno[ii]
np.savetxt(sys.stdout,
np.transpose([hpn,ra[ii],dec[ii],
Map_Position_Data[ii],probsum[ii],
np.arange(1,len(hpn)+1)]),
fmt="%16d %10.5f %10.5f %10.5f %10.5f %16d",
header="Healpix Number| RA| Dec|Probability|Cumulative Prob | Number of Fields")
def _parse_command_line_arguments():
"""
Parse and return command line arguments
"""
parser = ArgumentParser(
description=(
'Command-line tool to generate an observing plan from a LIGO/Virgo probability map (with an optional galaxy map too)'
),
)
parser.add_argument(
'sky-map',
type=str,
help=(
'A FITS file containing the LIGO/Virgo probability map in HEALPIX format'
),
)
parser.add_argument(
'nside',
type=int,
help=(
'nside for the output map'
'nside = ceil(sqrt(3/Pi) 60 / s)'
'where s is the length of one side of the square field of view in degrees.'
'It will be rounded to the nearest power of two.'
),
)
parser.add_argument(
'--gal-map',
required=False,
type=str,
help='A FITS file containing the galaxy density map in HEALPIX format'
)
parser.add_argument(
'--nvalues',
required=False,
type=int,
help='Number of Maximum Probability pixels to be shown'
)
parser.add_argument(
'--cumprob',
required=False,
type=float,
help='Output up to the given cumulative probability'
)
parser.add_argument('--savefigures',dest='savefigures',action='store_true')
parser.add_argument('--no-savefigures',dest='savefigures',action='store_false')
parser.set_defaults(savefigures=False)
parser.add_argument('--textoutput',dest='textoutput',action='store_true')
parser.add_argument('--no-textoutput',dest='textoutput',action='store_false')
parser.set_defaults(textoutput=False)
arguments = vars(parser.parse_args())
return arguments
#------------------------------------------------------------------------------
# main
#
def _main():
"""
This is the main routine.
"""
args=_parse_command_line_arguments()
MakeObsPlan(args['sky-map'],args['nside'],args['savefigures'],
nvalues=args['nvalues'],cumprob=args['cumprob'],
DensityMap_name=args['gal_map'],TextOutput=args['textoutput'])
'''
#### Input Parameters #####
DensityMap_name = argv[1] # Density Map Name or none
SkyMap_name = argv[2] # Sky Map Name
nside = int(argv[3]) # NSIDE of probability Map
SaveFigures = argv[4] # Yes or No
nvalues = int(argv[5]) # Number of Maximum Probability pixels to be shown
MakeObsPlan(SkyMap_name,nside,SaveFigures,nvalues,DensityMap_name)
'''
#------------------------------------------------------------------------------
# Start program execution.
#
if __name__ == '__main__':
_main()
|
UBC-Astrophysics/ObsPlan
|
ObsPlan.py
|
Python
|
gpl-3.0
| 9,933
|
[
"Galaxy"
] |
9c777b036deb6286706064f973eeacb5febd031d590641e5aebd4450986a8c8d
|
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao import mf as mf_c
from pyscf.data.nist import HARTREE2EV
from pyscf.nao.m_overlap_ni import overlap_ni
class KnowValues(unittest.TestCase):
def test_0073_vna_vnl_N2(self):
""" Test the Ena energy and indirectly VNA matrix elements """
dname = os.path.dirname(os.path.abspath(__file__))
mf = mf_c(label='n2', cd=dname)
vna = mf.vna_coo(level=3).toarray()
rdm = mf.make_rdm1()[0,0,:,:,0]
Ena = HARTREE2EV*(-0.5)*(vna*rdm).sum()
self.assertAlmostEqual(Ena, 133.24212864149359)
# siesta: Ena = 133.196299
vnl = mf.vnl_coo().toarray()
Enl = HARTREE2EV*(vnl*rdm).sum()
self.assertAlmostEqual(Enl, -61.604522776730128)
#siesta: Enl = -61.601204
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0073_vna_vnl_n2.py
|
Python
|
apache-2.0
| 854
|
[
"PySCF",
"SIESTA"
] |
c679b1e99f8bdbc1584120f5bbfc642cfa981f3f403c0341629bbd6eeed808b2
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from setuptools.command.egg_info import egg_info
from setuptools.command.develop import develop
from setuptools.command.install import install
import os
from setuptools import find_packages, setup
__version__ = '2.0.1-dev'
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# bootstrap numpy intall
# https://stackoverflow.com/questions/51546255/
# python-package-setup-setup-py-with-customisation
# -to-handle-wrapped-fortran
def custom_command():
import sys
if sys.platform in ['darwin', 'linux']:
os.system('pip install numpy')
class CustomInstallCommand(install):
def run(self):
install.run(self)
custom_command()
class CustomDevelopCommand(develop):
def run(self):
develop.run(self)
custom_command()
class CustomEggInfoCommand(egg_info):
def run(self):
egg_info.run(self)
custom_command()
extensions = [
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
classes = """
Development Status :: 1 - Planning
Intended Audience :: Science/Research
Natural Language :: English
Operating System :: MacOS :: MacOS X
Operating System :: POSIX
Operating System :: Unix
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = "Python implementation of the SourceTracker R package."
standalone = ['sourcetracker2=sourcetracker._cli.gibbs:gibbs']
q2cmds = ['q2-sourcetracker2=sourcetracker._q2.plugin_setup:plugin']
with open('README.md') as f:
long_description = f.read()
setup(
name='sourcetracker',
version=__version__,
license='modified BSD',
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
author='Biota Technology',
author_email='will@biota.com',
maintainer='Will Van Treuren',
maintainer_email='will@biota.com',
url='http://www.biota.com',
packages=find_packages(),
ext_modules=extensions,
install_requires=[
'numpy',
'click',
'pandas',
'scipy',
'nose',
'scikit-learn',
'scikit-bio',
'biom-format',
'h5py',
'seaborn'],
classifiers=classifiers,
package_data={'sourcetracker/_q2': ['citations.bib']},
entry_points={'qiime2.plugins': q2cmds,
'console_scripts': standalone},
cmdclass={'install': CustomInstallCommand,
'develop': CustomDevelopCommand,
'egg_info': CustomEggInfoCommand},
zip_safe=False)
|
biota/sourcetracker2
|
setup.py
|
Python
|
bsd-3-clause
| 3,163
|
[
"scikit-bio"
] |
60c2a6101a1950c159082c3eb4be6943f42550aaa6cbbd4457a7c92beb536ac5
|
"""rallpacks_cable_hhchannel.py:
A cable with 1000 compartments with HH-type channels in it.
Last modified: Wed May 21, 2014 09:51AM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import moose
from moose import utils
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import compartment as comp
EREST_ACT = -65e-3
per_ms = 1e3
dt = 5e-5
cable = []
def alphaM(A, B, V0, v):
'''Compute alpha_m at point v
aplha_m = A(v - v0 ) / (exp((v-V0)/B) - 1)
'''
return (A*(v-V0) / (np.exp((v - V0)/B) -1 ))
def alphaN(A, B, V0, v):
'''Compute alpha_n at point v
aplha_n = A(v-V0) / (exp((v-V0)/B) -1 )
'''
return alphaM(A, B, V0, v)
def betaM(A, B, V0, v):
'''Compute beta_m at point v
'''
return (A * np.exp((v-V0)/B))
def betaN(A, B, V0, v):
return betaM(A, B, V0, v)
def alphaH(A, B, V0, v):
'''Compute alpha_h at point v
'''
return (A * np.exp(( v - V0) / B))
def behaH(A, B, V0, v):
'''Compute beta_h at point v
'''
return (A * np.exp((v-V0)/B) + 1)
def createChannel(species, path, **kwargs):
"""Create a channel """
if species == 'na':
return sodiumChannel( path, **kwargs)
elif species == 'ca':
channel.Xpower = 4
else:
utils.dump("FATAL", "Unsupported channel type: {}".format(species))
raise RuntimeError("Unsupported species of chanel")
def create_na_chan(parent='/library', name='na', vmin=-110e-3, vmax=50e-3, vdivs=3000):
"""Create a Hodhkin-Huxley Na channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
na = moose.HHChannel('%s/%s' % (parent, name))
na.Xpower = 3
na.Ypower = 1
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
m_alpha = per_ms * (25 - v * 1e3) / (10 * (np.exp((25 - v * 1e3) / 10) - 1))
m_beta = per_ms * 4 * np.exp(- v * 1e3/ 18)
m_gate = moose.element('%s/gateX' % (na.path))
m_gate.min = vmin
m_gate.max = vmax
m_gate.divs = vdivs
m_gate.tableA = m_alpha
m_gate.tableB = m_alpha + m_beta
h_alpha = per_ms * 0.07 * np.exp(-v / 20e-3)
h_beta = per_ms * 1/(np.exp((30e-3 - v) / 10e-3) + 1)
h_gate = moose.element('%s/gateY' % (na.path))
h_gate.min = vmin
h_gate.max = vmax
h_gate.divs = vdivs
h_gate.tableA = h_alpha
h_gate.tableB = h_alpha + h_beta
return na
def create_k_chan(parent='/library', name='k', vmin=-120e-3, vmax=40e-3, vdivs=3000):
"""Create a Hodhkin-Huxley K channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
k = moose.HHChannel('%s/%s' % (parent, name))
k.Xpower = 4
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
n_alpha = per_ms * (10 - v * 1e3)/(100 * (np.exp((10 - v * 1e3)/10) - 1))
n_beta = per_ms * 0.125 * np.exp(- v * 1e3 / 80)
n_gate = moose.element('%s/gateX' % (k.path))
n_gate.min = vmin
n_gate.max = vmax
n_gate.divs = vdivs
n_gate.tableA = n_alpha
n_gate.tableB = n_alpha + n_beta
return k
def creaetHHComp(parent='/library', name='hhcomp', diameter=1e-6, length=1e-6):
"""Create a compartment with Hodgkin-Huxley type ion channels (Na and
K).
Returns a 3-tuple: (compartment, nachannel, kchannel)
"""
compPath = '{}/{}'.format(parent, name)
mc = comp.MooseCompartment( compPath, length, diameter, {})
c = mc.mc_
sarea = mc.surfaceArea
if moose.exists('/library/na'):
moose.copy('/library/na', c.path, 'na')
else:
create_na_chan(parent = c.path)
na = moose.element('%s/na' % (c.path))
# Na-conductance 120 mS/cm^2
na.Gbar = 120e-3 * sarea * 1e4
na.Ek = 115e-3 + EREST_ACT
moose.connect(c, 'channel', na, 'channel')
if moose.exists('/library/k'):
moose.copy('/library/k', c.path, 'k')
else:
create_k_chan(parent = c.path)
k = moose.element('%s/k' % (c.path))
# K-conductance 36 mS/cm^2
k.Gbar = 36e-3 * sarea * 1e4
k.Ek = -12e-3 + EREST_ACT
moose.connect(c, 'channel', k, 'channel')
return (c, na, k)
def makeCable(args):
global cable
ncomp = args['ncomp']
moose.Neutral('/cable')
for i in range( ncomp ):
compName = 'hhcomp{}'.format(i)
hhComp = creaetHHComp( '/cable', compName )
cable.append( hhComp[0] )
# connect the cable.
for i, hhc in enumerate(cable[0:-1]):
hhc.connect('axial', cable[i+1], 'raxial')
def setupDUT( dt ):
global cable
comp = cable[0]
data = moose.Neutral('/data')
pg = moose.PulseGen('/data/pg')
pg.firstWidth = 25e-3
pg.firstLevel = 1e-10
moose.connect(pg, 'output', comp, 'injectMsg')
setupClocks( dt )
def setupClocks( dt ):
moose.setClock(0, dt)
moose.setClock(1, dt)
def setupSolver( hsolveDt ):
hsolvePath = '/hsolve'
hsolve = moose.HSolve( hsolvePath )
hsolve.dt = hsolveDt
hsolve.target = '/cable'
moose.useClock(1, hsolvePath, 'process')
def simulate( runTime, dt):
""" Simulate the cable """
moose.reinit()
setupSolver( hsolveDt = dt )
moose.start( runTime )
def main(args):
global cable
dt = args['dt']
makeCable(args)
setupDUT( dt )
t = time.time()
simulate( args['run_time'], dt )
print( 'Time to run %f seconds ' % ( time.time() - t ) )
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description = 'Rallpacks3: A cable with n compartment with HHChannel'
)
parser.add_argument( '--tau'
, default = 0.04
, type = float
, help = 'Time constant of membrane'
)
parser.add_argument( '--run_time'
, default = 0.25
, type = float
, help = 'Simulation run time'
)
parser.add_argument( '--dt'
, default = 5e-5
, type = float
, help = 'Step time during simulation'
)
parser.add_argument( '--Em'
, default = -65e-3
, type = float
, help = 'Resting potential of membrane'
)
parser.add_argument( '--RA'
, default = 1.0
, type = float
, help = 'Axial resistivity'
)
parser.add_argument( '--lambda'
, default = 1e-3
, type = float
, help = 'Lambda, what else?'
)
parser.add_argument( '--x'
, default = 1e-3
, type = float
, help = 'You should record membrane potential somewhere, right?'
)
parser.add_argument( '--length'
, default = 1e-3
, type = float
, help = 'Length of the cable'
)
parser.add_argument( '--diameter'
, default = 1e-6
, type = float
, help = 'Diameter of cable'
)
parser.add_argument( '--inj'
, default = 1e-10
, type = float
, help = 'Current injected at one end of the cable'
)
parser.add_argument( '--ncomp'
, default = 1000
, type = int
, help = 'No of compartment in cable'
)
parser.add_argument( '--output'
, default = None
, type = str
, help = 'Store simulation results to this file'
)
args = parser.parse_args()
main( vars(args) )
|
subhacom/moose-core
|
tests/python/Rallpacks/rallpacks_cable_hhchannel.py
|
Python
|
gpl-3.0
| 7,784
|
[
"MOOSE"
] |
6b4aa82e4577a8e39dd675b9217e4b4b287b393178d6bc657ce6fe5c98dc4cc9
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import urllib2
from cachedhttpresponse import CachedHttpResponse
from cachebase import CacheBase
class CacheHttpHandler(urllib2.BaseHandler, CacheBase):
""" CacheHttpHandler class that can be used as an httphandler for
urllib2.build_opener.
"""
def __init__(self, cacheObject, logger=None):
""" Initialises the CacheHttpHandler and sets the correct cacheObject.
Arguments:
cacheObject : CacheObject - Must be a FileCache or MemoryCache object,
or object with similar signature.
Keyword Arguments:
logger : Logger - A logger that will be used to log. If not
set a default "print" we be done.
"""
# call the base class
CacheBase.__init__(self, logger)
self.__cacheObject = cacheObject
self.__cacheMarker = "X-local-cache"
return
def default_open(self, request):
"""Handles GET requests. It check the cache and if a valid one is present
returns that one if it is still valid. Is called before a request is
actually done.
Arguments:
respone : urllib2.Request - The request that needs to be served.
Returns a CachedHttpResponse if a cached item is found or None if none is
found.
"""
# self.__Log("=============================================")
url = request.get_full_url()
self.__Log("HTTP request for url: '%s'", url)
try:
(headerKey, bodyKey) = self._GetCacheKeys(url)
# self.__Log("======= %s", request.get_method())
# Only cache GET methods.
if not request.get_method() == "GET":
# let the next handler try to process the request
self.__Log("Not caching '%s' requests", request.get_method())
return None
# check if a cache response is available
if self.__cacheObject.HasKey(headerKey) and self.__cacheObject.HasKey(bodyKey):
# retrieve the values
headerValue = self.__cacheObject.Get(headerKey)
bodyValue = self.__cacheObject.Get(bodyKey)
# and construct a CachedHttpResponse
cachedResponse = CachedHttpResponse(url, headerValue, bodyValue)
self.__Log("Found a %s", cachedResponse)
if not self._IsExpired(self.__cacheObject, headerKey, cachedResponse):
# if the current request is not expired, it's a cache hit
self.__Log("Cache-Hit")
cachedResponse.SetCachFlag(self.__cacheMarker)
return cachedResponse
elif self._MustRevalidate(cachedResponse):
# did it have a must re-validate? If so, it could still be OK.
self.__Log("Stale-Cache hit found. Revalidating")
request.add_header("If-None-Match", cachedResponse.cacheParameters['etag'])
return None
else:
# the cache already expired, so it's not a cache hit.
self.__Log("Expired Cache-Hit")
return None
else:
self.__Log("No-Cache-Hit")
return None
except:
# in case of an error, always return None so the next handler gets the request
self.__Log("Error retrieving HTTP request from cache.", error=True, exc_info=True)
return None
def http_response(self, request, response):
""" Is called after a response is found.
Arguments:
request : urllib2.Request - The request that was done
response : urllib2.Response - The found response
Returns the response that was passed as input
"""
self.__Log("Processing HTTP response")
try:
if (request.get_method() == "GET" and self._IsCachableCode(response.code)):
# the request was a GET so we might need to cache
info = response.info()
# check if the response has the marker and is thus an already cached response.
if self.__cacheMarker in info:
self.__Log("This response came from the cache! No further processing needed.")
return response
url = request.get_full_url()
# retrieve the keys to store in the cache
(headerKey, bodyKey) = self._GetCacheKeys(url)
headerValue = response.info()
if response.code == 304:
bodyValue = self.__cacheObject.Get(bodyKey)
headerValue = self.__cacheObject.Get(headerKey)
response = CachedHttpResponse(url, headerValue, bodyValue)
self.__Log("304 Response found. Pro-Longing the %s", response)
# no need to continue, just return the value from the cache as
# it was still valid
return response
else:
bodyValue = response.read()
# create a new response object to return
response = CachedHttpResponse(url, headerValue, bodyValue)
self.__Log("Creating a %s", response)
if self._ShouldBeCached(response):
self.__Log("Cacheable response found, Caching request for url: '%s'", url)
# store both in the cache
self.__cacheObject.Set(headerKey, headerValue)
self.__cacheObject.Set(bodyKey, bodyValue)
else:
self.__Log("Not a GET request or not-cachable HTTP code (%s).", response.code)
except:
self.__Log("Error saving HTTP request into cache.", error=True, exc_info=True)
return CachedHttpResponse(url, headerValue, bodyValue, doProcessing=False)
return response
def __Log(self, message, *args, **kwargs):
""" Used to log a debug message. Message will be passed on to self._log
with classname added.
Arguments:
message : String - The message to log
*args : List[Object] - A list of arguments that will be used to
substitute parameters in the message.
Keyword Arguments:
**kwargs : Dictionary - List of additional keyword arguments. Possible
values are: "error = True"
"""
self._Log("CacheHttpHandler", message, *args, **kwargs)
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/cache/cachehttphandler.py
|
Python
|
gpl-2.0
| 7,433
|
[
"VisIt"
] |
90b7cd3d660e74bc59d4c47b4a2ec8455881ef1928d7bfd2e384851ee87541ea
|
#!/usr/bin/env python
import os, time, re
from functools import partial
from flask import Flask, Module, url_for, request, session, redirect, g, make_response, current_app, render_template
from decorators import login_required, guest_or_login_required, with_lock
from decorators import global_lock
# Make flask use the old session foo from <=flask-0.9
from flask_oldsessions import OldSecureCookieSessionInterface
from flask.ext.autoindex import AutoIndex
from sage.env import SAGE_SRC, SAGE_DOC
SRC = os.path.join(SAGE_SRC, 'sage')
from flask.ext.openid import OpenID
from flask.ext.babel import Babel, gettext, ngettext, lazy_gettext, get_locale
from sagenb.misc.misc import SAGENB_ROOT, DATA, translations_path, N_, nN_, unicode_str
from json import dumps
from sagenb.notebook.cell import number_of_rows
from sagenb.notebook.template import (css_escape, clean_name,
prettify_time_ago, TEMPLATE_PATH)
oid = OpenID()
class SageNBFlask(Flask):
static_path = ''
def __init__(self, *args, **kwds):
self.startup_token = kwds.pop('startup_token', None)
Flask.__init__(self, *args, **kwds)
self.session_interface = OldSecureCookieSessionInterface()
self.config['SESSION_COOKIE_HTTPONLY'] = False
self.root_path = SAGENB_ROOT
self.add_static_path('/css', os.path.join(DATA, "sage", "css"))
self.add_static_path('/images', os.path.join(DATA, "sage", "images"))
self.add_static_path('/javascript', DATA)
self.add_static_path('/static', DATA)
self.add_static_path('/java', DATA)
self.add_static_path('/java/jmol', os.path.join(os.environ["SAGE_ROOT"],"local","share","jmol"))
self.add_static_path('/jsmol', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol"))
self.add_static_path('/jsmol/js', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol","js"))
self.add_static_path('/j2s', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol","j2s"))
self.add_static_path('/jsmol/j2s', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol","j2s"))
self.add_static_path('/j2s/core', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol","j2s","core"))
import mimetypes
mimetypes.add_type('text/plain','.jmol')
#######
# Doc #
#######
#These "should" be in doc.py
DOC = os.path.join(SAGE_DOC, 'html', 'en')
self.add_static_path('/pdf', os.path.join(SAGE_DOC, 'pdf'))
self.add_static_path('/doc/static', DOC)
# Template globals
self.add_template_global(url_for)
# Template filters
self.add_template_filter(css_escape)
self.add_template_filter(number_of_rows)
self.add_template_filter(clean_name)
self.add_template_filter(prettify_time_ago)
self.add_template_filter(max)
self.add_template_filter(lambda x: repr(unicode_str(x))[1:],
name='repr_str')
self.add_template_filter(dumps, 'tojson')
def static_view_func(self, root_path, filename):
from flask.helpers import send_from_directory
return send_from_directory(root_path, filename)
def add_static_path(self, base_url, root_path):
self.add_url_rule(base_url + '/<path:filename>',
endpoint='/static'+base_url,
view_func=partial(self.static_view_func, root_path))
def message(self, msg, cont='/', username=None, **kwds):
from sagenb.misc.misc import SAGE_VERSION
"""Returns an error message to the user."""
template_dict = {'msg': msg, 'cont': cont, 'username': username, 'sage_version': SAGE_VERSION}
template_dict.update(kwds)
return render_template(os.path.join('html', 'error_message.html'),
**template_dict)
base = Module('sagenb.flask_version.base')
#############
# Main Page #
#############
@base.route('/')
def index():
if 'username' in session:
# If there is a next request use that. See issue #76
if 'next' in request.args:
response = redirect(request.values.get('next', ''))
return response
response = redirect(url_for('worksheet_listing.home', username=session['username']))
if 'remember' in request.args:
response.set_cookie('nb_session_%s'%g.notebook.port,
expires=(time.time() + 60 * 60 * 24 * 14))
else:
response.set_cookie('nb_session_%s'%g.notebook.port)
response.set_cookie('cookie_test_%s'%g.notebook.port, expires=1)
return response
from authentication import login
if current_app.startup_token is not None and 'startup_token' in request.args:
if request.args['startup_token'] == current_app.startup_token:
g.username = session['username'] = 'admin'
session.modified = True
current_app.startup_token = None
return index()
return login()
######################
# Dynamic Javascript #
######################
from hashlib import sha1
@base.route('/javascript/dynamic/notebook_dynamic.js')
def dynamic_js():
from sagenb.notebook.js import javascript
# the javascript() function is cached, so there shouldn't be a big slowdown calling it
data,datahash = javascript()
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
response.headers['Etag']=datahash
return response
_localization_cache = {}
@base.route('/javascript/dynamic/localization.js')
def localization_js():
global _localization_cache
locale=repr(get_locale())
if _localization_cache.get(locale,None) is None:
data = render_template(os.path.join('js/localization.js'), N_=N_, nN_=nN_)
_localization_cache[locale] = (data, sha1(repr(data)).hexdigest())
data,datahash = _localization_cache[locale]
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
response.headers['Etag']=datahash
return response
_mathjax_js_cache = None
@base.route('/javascript/dynamic/mathjax_sage.js')
def mathjax_js():
global _mathjax_js_cache
if _mathjax_js_cache is None:
from sagenb.misc.misc import mathjax_macros
data = render_template('js/mathjax_sage.js', theme_mathjax_macros=mathjax_macros)
_mathjax_js_cache = (data, sha1(repr(data)).hexdigest())
data,datahash = _mathjax_js_cache
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
response.headers['Etag']=datahash
return response
@base.route('/javascript/dynamic/keyboard/<browser_os>')
def keyboard_js(browser_os):
from sagenb.notebook.keyboards import get_keyboard
data = get_keyboard(browser_os)
datahash=sha1(data).hexdigest()
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
response.headers['Etag']=datahash
return response
###############
# Dynamic CSS #
###############
@base.route('/css/main.css')
def main_css():
from sagenb.notebook.css import css
data,datahash = css()
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/css; charset=utf-8'
response.headers['Etag']=datahash
return response
########
# Help #
########
@base.route('/help')
@login_required
def help():
from sagenb.notebook.tutorial import notebook_help
from sagenb.misc.misc import SAGE_VERSION
return render_template(os.path.join('html', 'docs.html'), username = g.username, notebook_help = notebook_help, sage_version=SAGE_VERSION)
###########
# History #
###########
@base.route('/history')
@login_required
def history():
return render_template(os.path.join('html', 'history.html'), username = g.username,
text = g.notebook.user_history_text(g.username), actions = False)
@base.route('/live_history')
@login_required
def live_history():
W = g.notebook.create_new_worksheet_from_history(gettext('Log'), g.username, 100)
from worksheet import url_for_worksheet
return redirect(url_for_worksheet(W))
###########
# Favicon #
###########
@base.route('/favicon.ico')
def favicon():
from flask.helpers import send_file
return send_file(os.path.join(DATA, 'sage', 'images', 'favicon.ico'))
@base.route('/loginoid', methods=['POST', 'GET'])
@guest_or_login_required
@oid.loginhandler
def loginoid():
if not g.notebook.conf()['openid']:
return redirect(url_for('base.index'))
if g.username != 'guest':
return redirect(request.values.get('next', url_for('base.index')))
if request.method == 'POST':
openid = request.form.get('url')
if openid:
return oid.try_login(openid, ask_for=['email', 'fullname', 'nickname'])
return redirect(url_for('authentication.login'))
#render_template('html/login.html', next=oid.get_next_url(), error=oid.fetch_error())
@oid.after_login
@with_lock
def create_or_login(resp):
if not g.notebook.conf()['openid']:
return redirect(url_for('base.index'))
try:
username = g.notebook.user_manager().get_username_from_openid(resp.identity_url)
session['username'] = g.username = username
session.modified = True
except (KeyError, LookupError):
session['openid_response'] = resp
session.modified = True
return redirect(url_for('set_profiles'))
return redirect(request.values.get('next', url_for('base.index')))
@base.route('/openid_profiles', methods=['POST','GET'])
def set_profiles():
if not g.notebook.conf()['openid']:
return redirect(url_for('base.index'))
from sagenb.notebook.challenge import challenge
show_challenge=g.notebook.conf()['challenge']
if show_challenge:
chal = challenge(g.notebook.conf(),
is_secure = g.notebook.secure,
remote_ip = request.environ['REMOTE_ADDR'])
if request.method == 'GET':
if 'openid_response' in session:
from sagenb.notebook.misc import valid_username_chars
re_invalid_username_chars = re.compile('[^(%s)]' % valid_username_chars)
openid_resp = session['openid_response']
if openid_resp.fullname is not None:
openid_resp.fullname = re.sub(re_invalid_username_chars, '_', openid_resp.fullname)
template_dict={}
if show_challenge:
template_dict['challenge_html'] = chal.html()
return render_template('html/accounts/openid_profile.html', resp=openid_resp,
challenge=show_challenge, **template_dict)
else:
return redirect(url_for('base.index'))
if request.method == 'POST':
if 'openid_response' in session:
parse_dict = {'resp':session['openid_response']}
else:
return redirect(url_for('base.index'))
try:
resp = session['openid_response']
username = request.form.get('username')
from sagenb.notebook.user import User
from sagenb.notebook.misc import is_valid_username, is_valid_email
if show_challenge:
parse_dict['challenge'] = True
status = chal.is_valid_response(req_args = request.values)
if status.is_valid is True:
pass
elif status.is_valid is False:
err_code = status.error_code
if err_code:
parse_dict['challenge_html'] = chal.html(error_code = err_code)
else:
parse_dict['challenge_invalid'] = True
raise ValueError("Invalid challenge")
else:
parse_dict['challenge_missing'] = True
raise ValueError("Missing challenge")
if not is_valid_username(username):
parse_dict['username_invalid'] = True
raise ValueError("Invalid username")
if g.notebook.user_manager().user_exists(username):
parse_dict['username_taken'] = True
raise ValueError("Pre-existing username")
if not is_valid_email(request.form.get('email')):
parse_dict['email_invalid'] = True
raise ValueError("Invalid email")
try:
new_user = User(username, '', email = resp.email, account_type='user')
g.notebook.user_manager().add_user_object(new_user)
except ValueError as msg:
parse_dict['creation_error'] = True
raise ValueError("Error in creating user\n%s"%msg)
g.notebook.user_manager().create_new_openid(resp.identity_url, username)
session['username'] = g.username = username
session.modified = True
except ValueError:
return render_template('html/accounts/openid_profile.html', **parse_dict)
return redirect(url_for('base.index'))
#############
# OLD STUFF #
#############
############################
# Notebook autosave.
############################
# save if make a change to notebook and at least some seconds have elapsed since last save.
def init_updates():
global save_interval, idle_interval, last_save_time, last_idle_time
from sagenb.misc.misc import walltime
save_interval = notebook.conf()['save_interval']
idle_interval = notebook.conf()['idle_check_interval']
last_save_time = walltime()
last_idle_time = walltime()
def notebook_save_check():
global last_save_time
from sagenb.misc.misc import walltime
t = walltime()
if t > last_save_time + save_interval:
with global_lock:
# if someone got the lock before we did, they might have saved,
# so we check against the last_save_time again
# we don't put the global_lock around the outer loop since we don't need
# it unless we are actually thinking about saving.
if t > last_save_time + save_interval:
notebook.save()
last_save_time = t
def notebook_idle_check():
global last_idle_time
from sagenb.misc.misc import walltime
t = walltime()
if t > last_idle_time + idle_interval:
with global_lock:
# if someone got the lock before we did, they might have already idled,
# so we check against the last_idle_time again
# we don't put the global_lock around the outer loop since we don't need
# it unless we are actually thinking about quitting worksheets
if t > last_idle_time + idle_interval:
notebook.update_worksheet_processes()
notebook.quit_idle_worksheet_processes()
last_idle_time = t
def notebook_updates():
notebook_save_check()
notebook_idle_check()
notebook = None
#CLEAN THIS UP!
def create_app(path_to_notebook, *args, **kwds):
"""
This is the main method to create a running notebook. This is
called from the process spawned in run_notebook.py
"""
global notebook
startup_token = kwds.pop('startup_token', None)
#############
# OLD STUFF #
#############
import sagenb.notebook.notebook as notebook
notebook.MATHJAX = True
notebook = notebook.load_notebook(path_to_notebook, *args, **kwds)
init_updates()
##############
# Create app #
##############
app = SageNBFlask('flask_version', startup_token=startup_token,
template_folder=TEMPLATE_PATH)
app.secret_key = os.urandom(24)
oid.init_app(app)
app.debug = True
@app.before_request
def set_notebook_object():
g.notebook = notebook
####################################
# create Babel translation manager #
####################################
babel = Babel(app, default_locale='en_US')
#Check if saved default language exists. If not fallback to default
@app.before_first_request
def check_default_lang():
def_lang = notebook.conf()['default_language']
trans_ids = [str(trans) for trans in babel.list_translations()]
if def_lang not in trans_ids:
notebook.conf()['default_language'] = None
#register callback function for locale selection
#this function must be modified to add per user language support
@babel.localeselector
def get_locale():
return g.notebook.conf()['default_language']
########################
# Register the modules #
########################
app.register_blueprint(base)
from worksheet_listing import worksheet_listing
app.register_blueprint(worksheet_listing)
from admin import admin
app.register_blueprint(admin)
from authentication import authentication
app.register_blueprint(authentication)
from doc import doc
app.register_blueprint(doc)
from worksheet import ws as worksheet
app.register_blueprint(worksheet)
from settings import settings
app.register_blueprint(settings)
# Handles all uncaught exceptions by sending an e-mail to the
# administrator(s) and displaying an error page.
@app.errorhandler(Exception)
def log_exception(error):
from sagenb.notebook.notification import logger
logger.exception(error)
return app.message(
gettext('''500: Internal server error.'''),
username=getattr(g, 'username', 'guest')), 500
#autoindex v0.3 doesnt seem to work with modules
#routing with app directly does the trick
#TODO: Check to see if autoindex 0.4 works with modules
idx = AutoIndex(app, browse_root=SRC, add_url_rules=False)
@app.route('/src/')
@app.route('/src/<path:path>')
@guest_or_login_required
def autoindex(path='.'):
filename = os.path.join(SRC, path)
if os.path.isfile(filename):
from cgi import escape
src = escape(open(filename).read().decode('utf-8','ignore'))
if (os.path.splitext(filename)[1] in
['.py','.c','.cc','.h','.hh','.pyx','.pxd']):
return render_template(os.path.join('html', 'source_code.html'),
src_filename=path,
src=src, username = g.username)
return src
return idx.render_autoindex(path)
return app
|
jdemeyer/sagenb
|
sagenb/flask_version/base.py
|
Python
|
gpl-3.0
| 19,263
|
[
"Jmol"
] |
7b840da72d6ba988049978e53cc9c84b935e9714fca14d619236ff20d2ee51e9
|
"""
Description:
Module to convert ase.db files into a catmap input file
<fname>.db: db file
Contains all adsorbate states on
surfaces and clean slabs for reference.
Mandatory key value pairs:
--------------------------
"name" : str
Value that identifies the catalyst composition.
"species" : str
adsorbate chemical formula.
'' should be the value for clean slabs.
'-' should be inserted between seperate fragments.
"energy" or "epot" : float
potential energy from DFT.
Recommended key value pairs:
----------------------------
"site" : str
name of adsorption site.
"phase" : str
Value that identifies the catalyst phase.
"facet" : str
Name of the facet, preferably in hkl notation, e.g. '(100)'.
Note that integer strings like '100' are not accepted.
"surf_lattice" : str
Name of the surface lattice geometry.
E.g. HCP(001) and FCC(111) has "hexagonal" surface lattices.
"layers" : int
Number of atomic layers in slab.
"supercell" : str
Supercell size separated by 'x', e.g. '2x2'
"n": int
number of identical adsorbates.
Recommended keys in "data":
---------------------------
"BEEFvdW_contribs" : list
32 non-selfconsistent BEEF-vdW energies.
"frequencies" : list
vibrational frequencies.
Optional file dependencies:
---------------------------
<fname>.db : db file
Stores vibrational frequencies along with atomic structures
and energies.
"""
import warnings
import os
from uuid import uuid4
import numpy as np
import ase.db
from catmap import string2symbols
from ase.data import covalent_radii, atomic_numbers
from ase.calculators.singlepoint import SinglePointDFTCalculator
from catmap.api.bee import BEEFEnsemble as bee
try:
from tqdm import tqdm
except (ImportError, ModuleNotFoundError):
def tqdm(iterable):
return iterable
class EnergyLandscape(object):
"""Class for converting raw data from ASE db to an energy txt output.
The class is made for treating atomic structures in the db as points on
or over a global potential energy surface.
"""
def __init__(self, beef_size=2000, beef_seed=0):
"""Initialize class."""
self.bee = bee(size=beef_size, seed=beef_seed)
self.epot = {}
self.freq = {}
self.ens = {}
self.de_dict = {}
self.dbid = {}
self.reference_epot = {}
self.reference_ens = {}
self.rxn_paths = {}
self.formation_energies = {}
self.std = {}
def get_molecules(self, fname, selection=[], frequency_db=None):
""" Method for importing molecules.
Parameters
----------
fname : str
path and filename of an ase database file containing molecules.
selection : list
ASE database filter strings.
frequency_db : str
path and filename of an ASE-db file containing atomic structures
and vibrational frequencies.
"""
[mol_epot,
mol_freq,
mol_ens,
mol_dbid] = self._db2mol(fname, selection=selection,
freq_path=frequency_db)
self.epot.update(mol_epot)
self.freq.update(mol_freq)
self.ens.update(mol_ens)
self.dbid.update(mol_dbid)
def get_surfaces(self, fname, selection=[], frequency_db=None,
site_specific=False):
""" Method for importing clean slabs and slabs with adsorbates.
Parameters
----------
fname : str
path and filename of an ase database file containing slabs.
selection : list
ASE database filter strings.
frequency_db : str
path and filename of an ASE-db file containing atomic structures
and vibrational frequencies.
site_specific : bool
flag for distinguishing sites or not, when importing adsorbates.
"""
# Select and import from database file. Return most stable states.
[surf_epot,
surf_freq,
surf_ens,
surf_dbid] = self._db2surf(fname, selection=selection,
freq_path=frequency_db,
site_specific=site_specific)
# Store data in dictionaries.
self.epot.update(surf_epot)
self.freq.update(surf_freq)
self.ens.update(surf_ens)
self.dbid.update(surf_dbid)
def get_transition_states(self, fname, selection=[], frequency_db=None,
site_specific=False):
""" Method for importing surface transition states.
Parameters
----------
fname : str
path and filename of an ase database file
containing reaction images.
selection : list
ASE database filter strings.
frequency_db : str
path and filename of an ASE-db file containing atomic structures
and vibrational frequencies.
site_specific : bool
flag for distinguishing sites or not, when importing adsorbates.
"""
# Select and import images from a database file.
rxn_paths = self._db2pes(fname, selection=selection,
site_specific=site_specific)
# Return lowest saddle points.
self.rxn_paths.update(rxn_paths)
[surf_epot,
surf_freq,
surf_ens,
surf_dbid] = self.pes2ts(freq_path=frequency_db)
# Store data in dictionaries.
self.epot.update(surf_epot)
self.freq.update(surf_freq)
self.ens.update(surf_ens)
self.dbid.update(surf_dbid)
def calc_formation_energies(self, references, beef=True):
""" Method for generating formation energies.
Parameters
----------
references : list of tuples of strings.
The first item in each tuple must be an atomic symbol, and the
second item in each tuple must be the self.epot dictionary key
of a reference gas phase species, <species name>_gas.
"""
# Get atomic references.
[self.atomic_e,
self.atomic_ens] = self._mol2ref(references=references)
# Get dictionaries with slab references and atomic references.
[self.reference_epot,
self.reference_ens] = self._get_refs()
#
self.formation_energies = self._get_formation_energies()
if beef:
self.de_dict, self.std = self._get_BEEstd()
def correction(self, key, correction):
"""Apply energy correction to a formation energy.
Parameters
----------
key : str
Key from self.formation_energies
correction : float
Energy correction to be added.
"""
self.formation_energies[key] += correction
def db_attach_reference_id(self, slab_db, ads_db, overwrite=True):
slab_dict = self._slabs()
c_ads = ase.db.connect(ads_db)
for key in slab_dict:
slab_id = int(slab_dict[key]['id'])
for ads_id in slab_dict[key]['ads_ids']:
if overwrite:
c_ads.update(ads_id, slab_id=slab_id)
elif 'slab_id' not in c_ads.get(ads_id):
c_ads.update(ads_id, slab_id=slab_id)
def _slabs(self):
"""Return a dictionary constaining keys of slabs and dictionaries with
associated adsorbate keys.
Parameters
----------
"""
ads_slab_pairs = {}
missing_slab = []
for key in list(self.epot):
if 'gas' in key:
continue
n, species, cat, pha, lattice, fac, cell, site = key.split('_')
site_name = '_'.join(['0_', cat, pha, lattice, fac, cell,
'slab'])
if 'slab' not in key:
ads = species + '_' + site
dbid = self.dbid[key]
try:
slab_id = self.dbid[site_name]
except KeyError:
missing_slab.append(str(self.dbid[key]))
continue
if site_name not in ads_slab_pairs:
ads_slab_pairs.update({site_name: {'id': slab_id,
'species': [ads],
'ads_ids': [dbid]}})
else:
ads_slab_pairs[site_name]['species'].append(ads)
ads_slab_pairs[site_name]['ads_ids'].append(dbid)
if len(missing_slab) > 0:
print('Missing slabs: ' + ','.join(missing_slab))
return ads_slab_pairs
def _db2mol(self, fname, selection=[], freq_path=None):
""" Returns four dictionaries containing:
ab initio energies,
frequecies,
non-selfconsistent BEEF perturbations,
database ids.
Parameters
----------
fname : str
path and filename of ase db file.
selection : list
ASE database filter strings.
freq_path : str
path and filename of an ASE-db file containing atomic structures
and vibrational frequencies.
File dependencies:
------------------
fname : ase-db file
Contains molecular reference states.
Mandatory key value pairs:
--------------------------
"energy" or "epot" : float
DFT calculated potential energy.
Optional key value pairs:
--------------------------
"data.BEEFens" : list
32 non-selfconsistent BEEF-vdW energies.
"""
# Connect to a database.
cmol = ase.db.connect(fname)
# Select data using search filters.
smol = cmol.select(selection)
# Connect to a database with frequencies.
if freq_path is not None:
c_freq = ase.db.connect(freq_path)
abinitio_energies = {}
freq_dict = {}
dbids = {}
ens_dict = {}
# Iterate over molecules.
for d in smol:
if 'energy' in d:
abinitio_energy = float(d.energy)
else:
abinitio_energy = float(d.epot)
species_name = str(d.formula)
# Attempt to retrieve the 32 BEEF perturbations.
try:
contribs = d.data.BEEFvdW_contribs
ens = self.bee.get_ensemble_perturbations(contribs)
except AttributeError:
ens = 0
# Store the most stable state of each molecule.
if species_name + '_gas' not in abinitio_energies:
abinitio_energies[species_name+'_gas'] = abinitio_energy
dbids[species_name + '_gas'] = int(d.id)
ens_dict[species_name + '_gas'] = ens
if freq_path is not None:
try:
d_freq = c_freq.get(['formula=' + species_name])
frequencies = d_freq.data.frequencies
freq_dict.update({species_name + '_gas': frequencies})
except KeyError:
continue
elif abinitio_energies[species_name+'_gas'] > abinitio_energy:
abinitio_energies[species_name+'_gas'] = abinitio_energy
dbids[species_name + '_gas'] = int(d.id)
ens_dict[species_name + '_gas'] = ens
return abinitio_energies, freq_dict, ens_dict, dbids
def _db2surf(self, fname, selection=[], freq_path=None,
site_specific=False):
""" Returns four dictionaries containing:
ab initio energies,
frequecies,
non-selfconsistent BEEF perturbations,
database ids.
File dependencies
-----------------
fname : ase.db file.
Parameters
----------
fname : string
path/filname.
selection : list
Optional ASE-db filter strings.
site_specific : boolean
If True: Dinstinguish sites using the site key value pair, and
stores a the potential energy of adsorbates on each site.
Else: Use the minimum ab initio energy, disregarding the site.
"""
# Connect to a database with clean surfaces and/or adsorbates.
csurf = ase.db.connect(fname)
# Connect to a database with frequencies.
if freq_path is not None:
c_freq = ase.db.connect(freq_path)
ssurf = csurf.select(selection)
abinitio_energies = {}
freq_dict = {}
dbids = {}
ens_dict = {}
# Loop over states.
for d in ssurf:
[n, species, name, phase, surf_lattice, facet,
cell] = self._get_adsorbate_fields(d)
# Skip any transition states.
if '-' in species:
continue
if 'ads' in d:
ads = str(d.ads)
else:
ads = species
if 'energy' in d:
abinitio_energy = float(d.energy)
else:
abinitio_energy = float(d.epot)
if species == '' or ('ads' in d and
(ads == 'slab' or ads == 'clean')):
species = ''
ads = 'slab'
site = 'slab'
n = 0
elif 'site' in d and site_specific:
site = str(d.site)
else:
site = 'site'
# Make key unique to a physical state of a site.
key = '_'.join([str(n), species, name, phase, surf_lattice,
facet, cell, site])
freq_key = key
# Attempt to import the 32 BEEF perturbations.
try:
contribs = d.data.BEEFvdW_contribs
ens = self.bee.get_ensemble_perturbations(contribs)
except AttributeError:
ens = 0
if key not in abinitio_energies:
abinitio_energies[key] = abinitio_energy
dbids[key] = int(d.id)
ens_dict[key] = ens
if species != '' and ads != 'slab' and freq_path is not None:
try:
freqsearch = ['species=' + species, 'name=' + name]
if site_specific is True:
freqsearch.append('site=' + site)
d_freq = c_freq.get(freqsearch)
frequencies = d_freq.data.frequencies
freq_dict.update({freq_key: frequencies})
except KeyError:
continue
elif abinitio_energies[key] > abinitio_energy:
abinitio_energies[key] = abinitio_energy
dbids[key] = int(d.id)
ens_dict[key] = ens
return abinitio_energies, freq_dict, ens_dict, dbids
def _get_adsorbate_fields(self, d):
"""Return a set of fields characterizing an adsorbate state.
Parameters
----------
d : dictionary
ASE database row.
Returns
----------
n : str
Number of adsorbates.
species : str
Species name. Must be chemical formula.
name : str
Name of catalyst.
phase : str
Crystal structure.
surf_lattice : str
Name of surface structure.
facet : str
Facet <hkl>
cell : str
Slab size <XxYxL>, where L denotes layers.
"""
if 'species' in d:
species = str(d.species)
else:
species = ''
name = str(d.name)
if 'supercell' in d:
cell = str(d.supercell)
else:
cell = 'XxY'
if 'layers' in d:
cell += 'x' + str(d.layers)
if 'crystal' in d:
phase = str(d.crystal)
elif 'phase' in d:
phase = str(d.phase)
elif 'spacegroup' in d:
phase = str(d.spacegroup)
else:
phase = ''
if 'surf_lattice' in d:
surf_lattice = str(d.surf_lattice)
else:
surf_lattice = ''
if 'facet' in d:
facet = str(d.facet)
else:
facet = 'facet'
if 'n' in d:
n = int(d.n)
elif species == '':
n = 0
else:
n = 1
return n, species, name, phase, surf_lattice, facet, cell
def _db2pes(self, fname, selection=[], site_specific=False):
"""Returns a dictionary containing potential energy surfaces and
meta data.
Dependencies
----------
fname : ase.db file.
Parameters
----------
fname : str
path/filname.
selection : list of strings.
Optional ase.db selection strings.
site_specific : boolean
If True: Dinstinguish sites using the site key value pair, and
stores a the potential energy of adsorbates on each site.
Else: Use the minimum ab initio energy, disregarding the site.
"""
c = ase.db.connect(fname)
s = c.select(selection)
rxn_paths = {}
# Loop over states from ase .db
for d in s:
# Store variables identifying the atomic states.
species = str(d.species)
# - identifies transition states.
if '-' not in species:
continue
# Most fiels are optional.
if 'supercell' in d:
cell = str(d.supercell)
else:
cell = 'XxY'
if 'layers' in d:
cell += 'x' + str(d.layers)
if 'phase' in d:
phase = str(d.phase)
else:
phase = ''
if 'surf_lattice' in d:
surf_lattice = str(d.surf_lattice)
else:
surf_lattice = ''
if 'facet' in d:
facet = str(d.facet)
else:
str(d.facet)
surf = '_'.join([str(d.name), phase, surf_lattice, facet, cell])
# 'energy' only exist if a calculator is attached to the atoms.
if 'energy' in d:
abinitio_energy = float(d.energy)
else:
abinitio_energy = float(d.epot)
dbid = int(d.id)
# Try to import non-selfconsistent BEEF contributions.
try:
BEEFvdW_contribs = d.data.BEEFvdW_contribs
ens = self.bee.get_ensemble_perturbations(BEEFvdW_contribs)
except AttributeError:
ens = 0 # np.zeros(self.bee.size)
if 'path_id' in d:
rxn_id = str(d.path_id)
else:
rxn_id = uuid4().hex
if 'distance' in d:
distance = float(d.distance)
else:
distance = np.nan
if 'step' in d:
step = int(d.step)
else:
step = np.nan
if rxn_id in rxn_paths:
rxn_paths[rxn_id]['pes'].append(abinitio_energy)
rxn_paths[rxn_id]['dbids'].append(dbid)
rxn_paths[rxn_id]['ens'].append(ens)
rxn_paths[rxn_id]['distance'].append(distance)
# except AttributeError:
# d0 = c.get(rxn_paths[rxn_id]['dbids'][0])
# atoms0 = c.get_atoms(rxn_paths[rxn_id]['dbids'][0])
# species0 = str(d0.species)
# g1, g2 = species0.split('-')
# cons = atoms0.constraints
# fbl = cons[-1].todict()['kwargs']['pairs'][0]
# atom1 = int(fbl[0])
# atom2 = int(fbl[1])
# assert (atoms0[atom2].symbol == g1[0] and
# atoms0[atom1].symbol == g2[0]) or \
# (atoms0[atom1].symbol == g1[0] and
# atoms0[atom2].symbol == g2[0])
# atoms = c.get_atoms(dbid)
# dist = atoms.get_distance(atom1, atom2, mic=True)
# print(dbid, dist, atom1, atom2)
# c.update(dbid, distance=dist)
if 'image' in d:
rxn_paths[rxn_id]['images'].append(int(d.image))
else:
rxn_paths[rxn_id]['images'].append(int(d.step))
else:
if 'site' in d and site_specific:
site = str(d.site)
else:
site = 'site'
rxn_paths[rxn_id] = {'surface_name': surf,
'species': species,
'pes': [abinitio_energy],
'dbids': [dbid],
'ens': [ens],
'site': site,
'images': [step],
'distance': [distance]}
return rxn_paths
def _mol2ref(self, references):
""" Returns two dictionaries containing:
abinitio energy references for atoms
ensemble non-selfconsistent perturbations for atoms.
"""
atomic_e = {}
atomic_ens = {}
for t in references:
key = t[0]
species = t[1]
atomic_e[key] = self.epot[species]
if species in self.ens:
atomic_ens[key] = np.array(self.ens[species])
composition = string2symbols(species.split('_')[0])
unique_element, count = np.unique(composition, return_counts=True)
n = None
for i, symbol in enumerate(unique_element):
if symbol == key:
n = count[i]
else:
atomic_e[key] -= atomic_e[symbol] * float(count[i])
if key in atomic_ens and symbol in atomic_ens:
atomic_ens[key] = atomic_ens[key] - \
np.array(atomic_ens[symbol]) * float(count[i])
atomic_e[key] = atomic_e[key] / float(n)
if key in atomic_ens:
atomic_ens[key] = atomic_ens[key] / float(n)
return atomic_e, atomic_ens
def _get_refs(self):
""" Returns dictionaries with referece energies of slabs and
single atoms.
Parameters
----------
references : list of tuples of strings.
The first item in each tuple must be an atomic symbol, and the
second item in each tuple must be the self.epot dictionary key
of a reference gas phase species, <species name>_gas.
"""
ref_dict = self.atomic_e
ref_ens = self.atomic_ens
for key in self.epot.keys():
if 'slab' in key:
ref_dict[key] = self.epot[key]
if key in self.ens:
ref_ens[key] = self.ens[key]
return ref_dict, ref_ens
def _get_formation_energies(self):
""" Returns a dictionary with formation energies of adsorbates.
Dependencies
----------
self : db2catmap object
self.epot : dictionary
Each key is named in the format:
n_species_name_phase_facet_supercell{x}layers_site,
and contains the potential energy of a slab an adsorbate or
a molecule.
self.reference_epot : dictionary
Each key is either an atomic symbol and contains the reference
potential energy of that atom,
or the key is named in the format: _name_phase_facet_slab and
it contains the reference potential energy of the slab.
"""
formation_energies = {}
missing_slab = []
for key in list(self.epot):
E0 = 0
if 'gas' in key:
species, site_name = key.split('_') # Split key into name/site
else:
n, species, cat, pha, lattice, fac, cell, site = key.split('_')
site_name = '_'.join(['0_', cat, pha, lattice, fac, cell,
'slab'])
try:
E0 -= self.reference_epot[site_name]
except KeyError:
missing_slab.append(str(self.dbid[key]))
continue
if 'slab' not in key:
composition = string2symbols(species.replace('-', ''))
E0 += self.epot[key]
for atom in composition:
E0 -= self.reference_epot[atom]
formation_energies[key] = E0
if abs(E0) / len(composition) > 5.:
warnings.warn('Large formation energy: ' +
str(E0 / len(composition)) +
' eV per atom. ' + str(self.dbid[key]) +
': ' + key)
if len(missing_slab) > 0:
print('Missing slabs: ' + ','.join(missing_slab))
return formation_energies
def _get_BEEstd(self):
""" Returns a dictionary with BEEF ensembles and one with
BEEF standard deviations on formation energies.
"""
de_dict = {}
std_dict = {}
for key in self.ens:
de = np.zeros(self.bee.size)
if 'gas' in key:
species, site_name = key.split('_') # Split key into name/site
else:
n, species, cat, pha, lattice, fac, cell, site = key.split('_')
site_name = '_'.join(['0_', cat, pha, lattice, fac, cell,
'slab'])
try:
de -= self.reference_ens[site_name]
except KeyError:
continue
if 'slab' not in key:
composition = string2symbols(species.replace('-', ''))
de += self.ens[key]
for atom in composition:
de -= self.reference_ens[atom]
de_dict[key] = de
sigma = np.std(de)
std_dict[key] = sigma
if sigma / len(composition) > 0.5:
msg = "Large BEEF 1 sigma: " + \
str(sigma / len(composition)) + " eV per atom. " + \
str(self.dbid[key]) + ": " + key
warnings.warn(msg)
return de_dict, std_dict
def get_ellipses(self, ads_x, ads_y,
site_x=None, site_y=None):
""" Returns three dictionaries, width, height and angle with the
parameters for plotting the covariance ellipses showing the
+/- 1 sigma confidence intervals.
Parameters
----------
de_dict : dict
contains beef perturbations of adsorbates on slabs,
where keys are named: adsorbate_name_phase_facet
ref_de : dict
contains beef perturbations of references,
where keys are refernce elements, e.g: 'C','H',
and also slabs references.
ads_x : str
adsorbate first dimension
ads_y : str
adsorbate second dimension
"""
widths = {}
heights = {}
angles = {}
# Loop over reference surfaces in ref_de.
for slab in self.reference_epot.keys():
# Ignore gas species.
if 'slab' not in slab:
continue
key_x = self._create_state(slab, ads_x, site_x)
if isinstance(key_x, list):
de_x = np.zeros(self.bee.size)
continue_outer = False
for k_x in key_x:
if k_x in self.de_dict:
de_x += self.de_dict[k_x]
else:
continue_outer = True
break
if continue_outer is True:
continue
elif key_x in self.de_dict:
de_x = self.de_dict[key_x]
else:
continue
key_y = self._create_state(slab, ads_y, site_y)
if key_y in self.de_dict:
de_y = self.de_dict[key_y]
else:
continue
if np.isclose(de_x, 0).all() or np.isclose(de_y, 0).all():
continue
width, height, angle = self.bee.get_ellipse(de_x, de_y)
widths[slab] = width
heights[slab] = height
angles[slab] = angle
self.width = widths
self.height = heights
self.angle = angles
return widths, heights, angles
def pes2ts(self, freq_path=None, rtol=1.1):
""" Returns dictionaries containing transition state energies.
Parameters
----------
freq_path : str
path/folder where frequency database is located.
rtol : float
relative tolerance of the threshold distance, where fixed bond
lenght calculations are considered complete.
"""
if freq_path is not None:
c_freq = ase.db.connect(freq_path)
abinitio_energies = {}
freq_dict = {}
dbids = {}
ens_dict = {}
calculate = []
for rxn_id in self.rxn_paths:
warn = False
species = self.rxn_paths[rxn_id]['species']
m = self.rxn_paths[rxn_id]['surface_name']
site = self.rxn_paths[rxn_id]['site']
key = '1_' + species + '_' + m + '_' + site
images = self.rxn_paths[rxn_id]['images']
pes = np.array(self.rxn_paths[rxn_id]['pes'])
if len(images) > 1:
s = np.argsort(images)
# Look for local minima and maxima.
localmins = np.where(np.r_[True, pes[s][1:] < pes[s][:-1]] &
np.r_[pes[s][:-1] < pes[s][1:], True])[0]
localmaxs = np.where(np.r_[True, pes[s][1:] > pes[s][:-1]] &
np.r_[pes[s][:-1] > pes[s][1:], True])[0]
# Measure path roughness
differences = np.diff(pes[s])
roughness = np.std(differences)
# For fixed bond length (drag) calculations
g1, g2 = species.split('-')
dbond = covalent_radii[atomic_numbers[g1[0]]]
if len(g2) > 0:
dbond += covalent_radii[atomic_numbers[g2[0]]]
else:
# Assume the bond is with the surface.
try:
dbond += covalent_radii[atomic_numbers[
m.split('_')[0]]]
except KeyError:
print("Bond not defined.")
if len(np.unique(images)) != len(images):
warn = True
print('non unique image number!')
print('Warning!', species, m, roughness,
len(localmaxs), len(localmins), images)
continue
if (len(localmaxs) > 1 or
len(localmins) > 2 or
len(localmins) == 1):
warn = True
try:
shortest = np.nanmin(self.rxn_paths[rxn_id]['distance'])
if shortest > dbond * rtol:
warn = True
s_last = np.argmax(self.rxn_paths[rxn_id]['images'])
calculate.append(
self.rxn_paths[rxn_id]['dbids'][s_last])
continue
except KeyError:
print('Distances missing in reaction path.')
if warn:
warnings.warn("Warning! " + species + "*" + m + " " +
str(round(dbond * rtol, 3)) + " AA. " +
str(round(shortest, 3)) + " AA. " +
str(roughness) + " eV. " +
str(len(localmaxs)) + " local maxima. " +
str(len(localmins)) + " local minima. " +
str(len(images)) + " images.")
tst = np.argmax(pes)
if key not in abinitio_energies:
abinitio_energies[key] = pes[tst]
dbids[key] = self.rxn_paths[rxn_id]['dbids'][tst]
ens_dict[key] = self.rxn_paths[rxn_id]['ens'][tst]
if freq_path is not None:
try:
d_freq = c_freq.get('path_id=' + rxn_id)
frequencies = d_freq.data.frequencies
freq_dict.update({key: frequencies})
except KeyError:
continue
elif abinitio_energies[key] > pes[tst]:
abinitio_energies[key] = pes[tst]
dbids[key] = self.rxn_paths[rxn_id]['dbids'][tst]
ens_dict[key] = self.rxn_paths[rxn_id]['ens'][tst]
if len(calculate) > 0:
incomplete = ','.join([str(int(a)) for a in np.unique(calculate)])
print('Incomplete:', incomplete)
return abinitio_energies, freq_dict, ens_dict, dbids
def scaling_analysis(self, x, y, lattice=None, site_x=None, site_y=None):
"""Returns the scaling relation information between the species
x and y on the surface geometry 'lattice' and site.
Parameters
----------
x : str or list
species x
y : str
species y
lattice : str or None
surface lattice of y
site_x='site' : str
Site of x. If _db2surf was run with site_specific=False, use the
default value, None or 'site'.
site_y='site' : str
Site of y. If _db2surf was run with site_specific=False, use the
default value, None or 'site'.
Returns
----------
slope : float
Scaling relation slope.
intercept : float
Scaling relation intercept.
X : list
List of formation energies.
Y : list
List of formation energies.
labels : list
List of keys referring to slabs.
"""
X = []
Y = []
labels = []
# Loop over slabs.
for slab in self.reference_epot.keys():
if 'slab' not in slab:
continue
# Filter by surface lattice.
if lattice is not None and lattice not in slab:
continue
# Get formation energy if it is stored.
key_y = self._create_state(slab, y, site_y)
if key_y not in self.formation_energies:
continue
key_x = self._create_state(slab, x, site_x)
# If the x species is a list, sum them.
if isinstance(key_x, list):
DeltaE_x = 0
continue_outer = False
for k_x in key_x:
if k_x in self.formation_energies:
DeltaE_x += self.formation_energies[k_x]
else:
continue_outer = True
break
if continue_outer:
continue
else:
X.append(DeltaE_x)
Y.append(self.formation_energies[key_y])
else:
if key_x in self.formation_energies:
X.append(self.formation_energies[key_x])
Y.append(self.formation_energies[key_y])
else:
continue
# Store the list of slabs.
labels.append(slab)
# Get the scaling relation.
slope, intercept = np.polyfit(X, Y, deg=1)
return slope, intercept, X, Y, labels
def _create_state(self, slab, species, site=None):
"""Return a key for a hypothetical adsorbate state. This is useful
for reading or filling in formation energies of states that are related
by scaling relations.
Parameters
----------
slab : str
key of the slab
species : str
adsorbate chemical formula
site : str
optional site.
"""
if isinstance(species, list):
key = []
for x in range(len(species)):
fields = slab.split('_')
fields[0] = '1'
fields[1] = species[x]
if site is None:
fields[-1] = 'site'
else:
fields[-1] = site[x]
key.append('_'.join(fields))
else:
fields = slab.split('_')
fields[0] = '1'
fields[1] = species
if site is None:
fields[-1] = 'site'
else:
fields[-1] = site
key = '_'.join(fields)
return key
def insert_interpolated_states(self, x, y, lattice=None, site_y=None,
slope=None, intercept=None):
""" Update the formation_energy dictionary with interpolated values.
This is intended for use with thermodynamic scalers only.
Parameters
----------
x : list of strings
keys from self.formation_energy
y : str
species
site_y : str
site of species y
slope : float or None
intercept : float or None
"""
if slope is None or intercept is None:
raise NotImplementedError("Call scaling_analysis.")
[slope,
intercept,
X,
Y,
labels] = self.scaling_analysis(x, y, lattice=lattice,
site_x=None, site_y=None)
for key_x in x:
X = self.formation_energies[key_x]
Y = slope * X + intercept
fields = key_x.split('_')
fields[1] = y
if site_y is not None:
fields[-1] = site_y
key_y = '_'.join(fields)
self.formation_energies.update({key_y: Y})
def _insert_state(self, key, descriptor, slopes, intercept):
""" Update the formation_energy dictionary with interpolated values.
This is intended for use with thermodynamic scalers only.
Parameters
----------
key : str
exact key of the species to be inserted self.formation_energy
descriptor : str or list
species names of descriptors.
slope : float or None
intercept : float or None
"""
interpolated = intercept
fields = key.split('_')
for a in range(len(descriptor)):
fields[1] = descriptor[a]
fields[-1] = 'site'
energy_key = '_'.join(fields)
interpolated += slopes[a] * self.formation_energies[energy_key]
state = {key: interpolated}
self.formation_energies.update(state)
def _insert_frequencies(self, key, frequencies):
""" Update the formation_energy dictionary with interpolated values.
This is intended for use with thermodynamic scalers only.
Parameters
----------
key : str
exact key of the species to be inserted self.freq
frequencies : list
list of frequencies.
slope : list
"""
state = {key: frequencies}
self.freq.update(state)
def insert_rscaled_states(self, x, y, site_y=None,
slope=None, intercept=None):
raise NotImplementedError("Coming in 2018.")
def make_ensemble_input_files(self, prefix, suffix, site_specific=False):
""" Save catmap input files for ensemble models.
It is advisable to use a smaller than default beef_size.
"""
if self.beef_size >= 2000:
warnings.warn("It is advisable to use a smaller than default " +
"beef_size for BEEF ensemble propagation through" +
"the micro-kinetic model.")
raise NotImplementedError("Coming in 2018.")
# Create a header.
# headerlist = ['surface_name', 'phase', 'site_name',
# 'species_name', 'formation_energy',
# 'frequencies', 'reference', 'coverage', 'std']
# header = '\t'.join(headerlist)
def db_attach_formation_energy(self, fname, key_name, overwrite=True):
""" Update a database file to append formation energies.
Parameters
----------
fname : str
path and filename of ase database.
"""
c = ase.db.connect(fname)
for key in tqdm(list(self.formation_energies)):
if 'gas' not in str(key):
if overwrite:
kvp = {key_name: float(self.formation_energies[key])}
c.update(int(self.dbid[key]), **kvp)
elif key_name not in c.get(int(self.dbid[key])):
kvp = {key_name: float(self.formation_energies[key])}
c.update(int(self.dbid[key]), **kvp)
def make_input_file(self, file_name, site_specific='facet',
catalyst_specific=False, covariance=None,
reference=None):
""" Saves the catmap input file.
Parameters
----------
file_name : string
path and name of the output file.
site_specific : boolean or string
Decides what to export to the site key.
True exports the site field from the db.
False exports the lattice field from the db.
'facet' exports the facet field from the db.
str : another string is treated as False, except if that string is
found in the site field.
covariance : tuple
Must contains two strings, which are species names, between which
BEEF covariance ellipses will be stored in the
width, heigh and angle columns of the catmap data file.
"""
# Create a header.
headerlist = ['surface_name', 'phase', 'site_name',
'species_name', 'formation_energy',
'frequencies', 'reference', 'coverage', 'std']
if covariance is not None:
headerlist += ['width', 'height', 'angle', 'covariance']
header = '\t'.join(headerlist)
# List of lines in the output.
lines = []
for key in self.formation_energies.keys(): # Iterate through keys
if key in self.dbid and reference is None:
ref = self.dbid[key]
else:
ref = reference
E = round(self.formation_energies[key], 4)
if 'gas' in key:
name, site = key.split('_') # Split key into name/site
try:
frequency = self.freq[key]
except KeyError:
frequency = []
try:
std = round(self.std[key], 4)
except KeyError:
std = np.NaN
else:
n, name, cat, pha, lattice, facet, cell, site = key.split('_')
if catalyst_specific and not catalyst_specific == cat:
continue
if 'slab' not in key: # Do not include empty site energy (0)
try:
freq_key = key
frequency = self.freq[freq_key]
except KeyError:
frequency = []
try:
std = round(self.std[key], 4)
except KeyError:
std = np.NaN
if site == 'gas':
surface = None
phase = ''
lattice = ''
site_name = 'gas'
coverage = 0.
else:
surface = cat
phase = pha
try:
coverage = round(float(n) /
(float(cell[0]) * float(cell[2])), 3)
except ValueError:
coverage = 0.
if site_specific is True:
site_name = lattice + '_' + site
elif site_specific is False:
site_name = lattice
elif site_specific == 'facet':
site_name = facet
else:
if site == site_specific:
site_name = site
else:
site_name = lattice
E = float(E)
outline = [surface, phase, site_name, name,
E, list(frequency), ref, coverage, std]
if covariance is not None:
if covariance[1] == name:
slab = '_'.join(['0_', cat, pha, lattice, facet, cell,
'slab'])
if slab in self.width:
width = round(self.width[slab], 4)
height = round(self.height[slab], 4)
angle = round(self.angle[slab], 4)
ellipse_ref = covariance[0]
else:
width = ''
height = ''
angle = ''
ellipse_ref = ''
else:
width = ''
height = ''
angle = ''
ellipse_ref = ''
outline += [width, height, angle, ellipse_ref]
line = '\t'.join([str(w) for w in outline])
lines.append(line)
# The file is easier to read if sorted (optional).
lines.sort()
# Add header to top.
lines = [header] + lines
# Join the lines with a line break.
input_file = '\n'.join(lines)
# Open the file name in write mode.
input = open(file_name, 'w')
# Write the text.
input.write(input_file)
# Close the file.
input.close()
print("Formation energies exported to " + file_name)
def make_nested_folders(self, project, reactions, surfaces=None,
site='site', mol_db=None,
slab_db=None, ads_db=None, ts_db=None,
publication='', url='', xc='xc', code='code'):
"""Saves a nested directory structure.
The folder structure for catalysis-hub.org should be
<project>
<code>
<xc>
<catalyst>
<facet>
<reaction>@<site>
<species>.traj or
<species>_<slab>.traj or
'TS'.traj
<catalyst>_<phase>_<bulk>
<gas>
<species>.traj>
Parameters
----------
project : str
parent folder name.
reactions : list
catmap's rxn_expressions. A list of strings.
surfaces : list
List of catalyst names.
site : str
Site name.
mol_db : str
Path and filename of database containing gas species
slab_db : str
Path and filename of database containing slabs
ads_db : str
Path and filename of database containing adsorbate/slab structures.
ts_db : str
Path and filename of database containing reaction paths.
publication : str
Author or publication reference.
url : str
url to publication.
"""
data_folder = project + '/' + code + '/' + xc
# Create a header
# spreadsheet = [['chemical_composition', 'facet', 'reactants',
# 'products', 'reaction_energy',
# 'beef_standard_deviation',
# 'activation_energy', 'DFT_code', 'DFT_functional',
# 'reference', 'url']]
# Connect to databases.
if surfaces is None:
surfaces = [s for s in self.reference_epot.keys() if 'slab' in s]
if mol_db is None:
mol_db = self.mol_db
if ads_db is None:
ads_db = self.ads_db
if slab_db is None:
slab_db = ads_db
if ts_db is None:
ts_db = self.ts_db
c_mol = ase.db.connect(mol_db)
c_ads = ase.db.connect(ads_db)
if slab_db == ads_db:
c_slab = c_ads
else:
c_slab = ase.db.connect(slab_db)
if ts_db is not None:
c_ts = ase.db.connect(ts_db)
# Iterate over surfaces
Nsurf = 0
Nrxn = 0
for slabkey in surfaces:
[n, species, name, phase,
lattice, facet, cell, slab] = slabkey.split('_')
catalyst_name = name.replace('/', '-') + '_' + phase
path_surface = data_folder + '/' + catalyst_name
facet_name = facet.replace('(', '').replace(')', '') + '_' + cell
path_facet = path_surface + '/' + facet_name
# Loop over reaction expressions.
for i, rxn in enumerate(reactions):
# Separate left and right side of reaction, and ts.
states = rxn.replace(' ', '').split('<->')
if len(states) == 1:
states = states[0].split('->')
if len(states) == 1:
states = states[0].split('<-')
elif len(states) < 3:
states = [states[0]] + states[-1].split('->')
if len(states) < 3:
states = states[0].split('<-') + states[1:]
# List individual species.
rname, reactants = self._state2species(states[0])
pname, products = self._state2species(states[-1])
reaction_name = '__'.join([rname, pname])
path_reaction = path_facet + '/' + reaction_name
DeltaE = 0.
de = np.zeros(self.bee.size)
ea = 0.
intermediates_exist = True
totraj = {}
# Find reactant structures and energies
for reactant in reactants:
species, sitesymbol = reactant.split('_')
n, species = self._coefficient_species(species)
if sitesymbol == 'g':
rkey = species + '_gas'
fname = data_folder + '/gas/' + rkey
elif species == '*':
rkey = slabkey
fname = path_facet + '/empty_slab'
else:
rkey = '_'.join(['1', species.replace('*', ''), name,
phase, lattice, facet, cell, site])
fname = path_reaction + '/' + species
if rkey not in self.dbid:
intermediates_exist = False
break
totraj.update({rkey:
{'dbid': self.dbid[rkey],
'fname': fname}})
if species != '*':
DeltaE -= n * self.formation_energies[rkey]
de -= n * self.de_dict[rkey]
if not intermediates_exist:
continue
# Find transition state structures and energies.
if ts_db is not None:
tstates = states[1].split('+')
for ts in tstates:
if '-' not in ts:
continue
species, sitesymbol = ts.split('_')
tskey = '_'.join(['1', species, name, phase, lattice,
facet, cell, site])
if tskey not in self.dbid:
continue
totraj.update({tskey:
{'dbid': self.dbid[tskey],
'fname': path_reaction + '/TS'}})
ea += self.formation_energies[tskey] - DeltaE
# Find product structures and energies.
for product in products:
species, sitesymbol = product.split('_')
if species[0].isdigit():
n = int(species[0])
species = species[1:]
else:
n = 1
if sitesymbol == 'g':
pkey = species + '_gas'
fname = data_folder + '/gas/' + pkey
elif species == '*':
pkey = slabkey
fname = path_facet + '/empty_slab'
else:
pkey = '_'.join(['1', species.replace('*', ''), name,
phase, lattice, facet, cell, site])
fname = path_reaction + '/' + species
if pkey not in self.dbid:
intermediates_exist = False
break
totraj.update({pkey:
{'dbid': self.dbid[pkey],
'fname': fname}})
if species != '*':
DeltaE += n * self.formation_energies[pkey]
de += n * self.de_dict[pkey]
# If all states are found for this surface, write.
if intermediates_exist:
# Loop over states to export.
for trajkey in totraj.keys():
fname = totraj[trajkey]['fname'] + '.traj'
# Load the atomic structure from appropriate db.
if 'gas' in trajkey:
atoms = c_mol.get_atoms(self.dbid[trajkey])
d = c_mol.get(self.dbid[trajkey])
elif '-' in trajkey.split('_')[1]:
atoms = c_ts.get_atoms(self.dbid[trajkey])
d = c_ts.get(self.dbid[trajkey])
elif 'slab' in trajkey.split('_')[-1]:
atoms = c_slab.get_atoms(self.dbid[trajkey])
d = c_slab.get(self.dbid[trajkey])
else:
atoms = c_ads.get_atoms(self.dbid[trajkey])
d = c_ads.get(self.dbid[trajkey])
if atoms.calc is None:
# Require a calculator.
calc = SinglePointDFTCalculator(atoms)
calc.results['energy'] = float(d.epot)
atoms.set_calculator(calc)
if 'data' not in atoms.info:
atoms.info['data'] = {}
if trajkey in self.freq:
# Attach vibrational frequencies.
atoms.info['data'].update(
{'frequencies': self.freq[trajkey]})
# Save trajectory file.
folder_structure = fname.split('/')
for depth in range(1, len(folder_structure)-1):
directory = '/'.join(folder_structure[:depth+1])
if not os.path.isdir(directory):
os.mkdir(directory)
atoms.write(fname)
# Store rows for spreadsheet.
# std = np.std(de)
# spreadsheet.append([name, facet, rname, pname,
# DeltaE, std, ea,
# code, xc,
# publication, url])
# width, height, angle, covariance
Nrxn += 1
else:
continue
Nsurf += 1
# with open(project + '/data.csv', 'wb') as f:
# writer = csv.writer(f)
# writer.writerows(spreadsheet)
print(Nrxn, 'reactions imported.')
print(Nsurf, 'surfaces saved.')
def _state2species(self, state):
"""Parse one side of a CatMAP rxn expression, i.e. a chemical state.
Parameters
----------
state : str
Left or right side of CatMAP rxn expression, excluding arrows.
Returns
----------
state_name : str
Name of chemical state formatted for folder naming.
slist : list
List of species. <coefficient><structure formula or Hill formula>.
"""
slist = state.split('+')
species = []
for specie in slist:
if '_g' in specie:
# Gas species.
species.append(specie.split('_')[0] + 'gas')
elif '*_' in specie:
# Empty sites.
species.append(specie.split('_')[0].replace('*', 'star'))
else:
# Adsorbates.
species.append(specie.split('_')[0] + 'star')
return '_'.join(species), slist
def _coefficient_species(self, species):
"""Return the stochiometric coefficient and the species type.
Parameters
----------
species : str
<coefficient><structure formula or Hill formula>.
Returns
----------
n : int
Stochiometric coefficient.
species : str
Species name.
"""
i = 0
n = 1
if species[0] == '-':
# Negative coefficient allowed.
i += 1
n = -1
while species[i].isdigit():
i += 1
if i > 1:
n = int(species[:i])
elif i == 1 and n != -1:
n = int(species[0])
return n, species[i:]
|
mieand/catmap
|
catmap/api/ase_data.py
|
Python
|
gpl-3.0
| 59,604
|
[
"ASE",
"CRYSTAL"
] |
9f56c42c28f067e5ecbd94213ceb3b2c55e4c2217f046a272db9c12a56a0c8fa
|
#!/usr/bin/env python
""" demo run for ase_qmmm_manyqm calculator """
# ./test_ase_qmmm_manyqm.py gromacs_mm-relax.g96
from ase.calculators.gromacs import Gromacs
from ase.calculators.aims import Aims
from ase.calculators.ase_qmmm_manyqm import AseQmmmManyqm
from ase.optimize import BFGS
import sys
from ase.io.gromos import read_gromos
RUN_COMMAND = '/home/mka/bin/aims.071711_6.serial.x'
SPECIES_DIR = '/home/mka/Programs/fhi-aims.071711_6/species_defaults/light/'
LOG_FILE = open("ase-qm-mm-output.log","w")
sys.stdout = LOG_FILE
infile_name = sys.argv[1]
CALC_QM1 = Aims(charge = 0,
xc = 'pbe',
sc_accuracy_etot = 1e-5,
sc_accuracy_eev = 1e-2,
sc_accuracy_rho = 1e-5,
sc_accuracy_forces = 1e-3,
species_dir = SPECIES_DIR,
run_command = RUN_COMMAND)
CALC_QM1.set(output = 'hirshfeld')
CALC_QM2 = Aims(charge = 0,
xc = 'pbe',
sc_accuracy_etot = 1e-5,
sc_accuracy_eev = 1e-2,
sc_accuracy_rho = 1e-5,
sc_accuracy_forces = 1e-3,
species_dir = SPECIES_DIR,
run_command = RUN_COMMAND)
CALC_QM2.set(output = 'hirshfeld')
CALC_QM3 = Aims(charge = 0,
xc = 'pbe',
sc_accuracy_etot = 1e-5,
sc_accuracy_eev = 1e-2,
sc_accuracy_rho = 1e-5,
sc_accuracy_forces = 1e-3,
species_dir = SPECIES_DIR,
run_command = RUN_COMMAND)
CALC_QM3.set(output = 'hirshfeld')
CALC_MM = Gromacs(
init_structure_file = infile_name,
structure_file = 'gromacs_qm.g96', \
force_field = 'oplsaa',
water_model = 'tip3p',
base_filename = 'gromacs_qm',
doing_qmmm = True, freeze_qm = False,
index_filename = 'index.ndx',
define = '-DFLEXIBLE',
integrator = 'md',
nsteps = '0',
nstfout = '1',
nstlog = '1',
nstenergy = '1',
nstlist = '1',
ns_type = 'grid',
pbc = 'xyz',
rlist = '1.15',
coulombtype = 'PME-Switch',
rcoulomb = '0.8',
vdwtype = 'shift',
rvdw = '0.8',
rvdw_switch = '0.75',
DispCorr = 'Ener')
CALC_MM.generate_topology_and_g96file()
CALC_MM.generate_gromacs_run_file()
CALC_QMMM = AseQmmmManyqm(nqm_regions = 3,
qm_calculators = [CALC_QM1, CALC_QM2, CALC_QM3],
mm_calculator = CALC_MM,
link_info = 'byQM')
# link_info = 'byFILE')
SYSTEM = read_gromos('gromacs_qm.g96')
SYSTEM.set_calculator(CALC_QMMM)
DYN = BFGS(SYSTEM)
DYN.run(fmax = 0.05)
print('exiting fine')
LOG_FILE.close()
|
conwayje/ase-python
|
doc/ase/calculators/test_ase_qmmm_manyqm.py
|
Python
|
gpl-2.0
| 2,703
|
[
"ASE",
"FHI-aims",
"GROMOS",
"Gromacs"
] |
30d16cada639b82d65689055670724a5b83cfd6d6b485276e2ed6ef9bd1e43e7
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui.backends.pyside.imports import *
from agui.aextras import AMessage
class Message(AMessage):
def message(self, window_title, title, message, icon, parent=None):
message2 = "<b>%s</b><br/><br/>%s" % (title, message)
self.message_alt(window_title, message2, icon, parent)
def message_alt(self, window_title, message, icon, parent=None):
self.dialog = QtGui.QMessageBox(None, window_title, message, QtGui.QMessageBox.Close, parent=parent)
self.dialog.setPixmap(icon.icon().pixmap(32, 32))
self.dialog.show()
def yes_no(self, window_title, message, icon=None, parent=None):
ans = QtGui.QMessageBox.question(self, window_title, message, QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes, parent=parent)
value = self.no
if ans == QtGui.MessageBox.Yes:
value = self.yes
return value
|
bhdouglass/agui
|
agui/backends/pyside/extras/message.py
|
Python
|
gpl-3.0
| 1,667
|
[
"Brian"
] |
095e7f02f037a4adb47b866a8841448485fe6492a5212738de3635a00f230b9c
|
# Datasets.
import json
import click
import mapbox
from mapboxcli.errors import MapboxCLIException
@click.group(short_help="Read and write Mapbox datasets (has subcommands)")
@click.pass_context
def datasets(ctx):
"""Read and write GeoJSON from Mapbox-hosted datasets
All endpoints require authentication. An access token with
appropriate dataset scopes is required, see `mapbox --help`.
Note that this API is currently a limited-access beta.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Datasets(access_token=access_token)
ctx.obj['service'] = service
@datasets.command(short_help="List datasets")
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def list(ctx, output):
"""List datasets.
Prints a list of objects describing datasets.
$ mapbox datasets list
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list()
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(short_help="Create an empty dataset")
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def create(ctx, name, description):
"""Create a new dataset.
Prints a JSON object containing the attributes
of the new dataset.
$ mapbox datasets create
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.create(name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-dataset",
short_help="Return information about a dataset")
@click.argument('dataset', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_dataset(ctx, dataset, output):
"""Read the attributes of a dataset.
Prints a JSON object containing the attributes
of a dataset. The attributes: owner (a Mapbox account),
id (dataset id), created (Unix timestamp), modified
(timestamp), name (string), and description (string).
$ mapbox datasets read-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_dataset(dataset)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="update-dataset",
short_help="Update information about a dataset")
@click.argument('dataset', required=True)
@click.option('--name', '-n', default=None, help="Name for the dataset")
@click.option('--description', '-d', default=None,
help="Description for the dataset")
@click.pass_context
def update_dataset(ctx, dataset, name, description):
"""Update the name and description of a dataset.
Prints a JSON object containing the updated dataset
attributes.
$ mapbox datasets update-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.update_dataset(dataset, name, description)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-dataset", short_help="Delete a dataset")
@click.argument('dataset', required=True)
@click.pass_context
def delete_dataset(ctx, dataset):
"""Delete a dataset.
$ mapbox datasets delete-dataset dataset-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_dataset(dataset)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="list-features",
short_help="List features in a dataset")
@click.argument('dataset', required=True)
@click.option('--reverse', '-r', default=False,
help="Read features in reverse")
@click.option('--start', '-s', default=None,
help="Feature id to begin reading from")
@click.option('--limit', '-l', default=None,
help="Maximum number of features to return")
@click.option('--output', '-o', default='-',
help="Save output to a file")
@click.pass_context
def list_features(ctx, dataset, reverse, start, limit, output):
"""Get features of a dataset.
Prints the features of the dataset as a GeoJSON feature collection.
$ mapbox datasets list-features dataset-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.list_features(dataset, reverse, start, limit)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="read-feature",
short_help="Read a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.option('--output', '-o', default='-', help="Save output to a file")
@click.pass_context
def read_feature(ctx, dataset, fid, output):
"""Read a dataset feature.
Prints a GeoJSON representation of the feature.
$ mapbox datasets read-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:read` scope is required, see `mapbox --help`.
"""
stdout = click.open_file(output, 'w')
service = ctx.obj.get('service')
res = service.read_feature(dataset, fid)
if res.status_code == 200:
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="put-feature",
short_help="Insert or update a single feature in a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.argument('feature', required=False, default=None)
@click.option('--input', '-i', default='-',
help="File containing a feature to put")
@click.pass_context
def put_feature(ctx, dataset, fid, feature, input):
"""Create or update a dataset feature.
The semantics of HTTP PUT apply: if the dataset has no feature
with the given `fid` a new feature will be created. Returns a
GeoJSON representation of the new or updated feature.
$ mapbox datasets put-feature dataset-id feature-id 'geojson-feature'
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
if feature is None:
stdin = click.open_file(input, 'r')
feature = stdin.read()
feature = json.loads(feature)
service = ctx.obj.get('service')
res = service.update_feature(dataset, fid, feature)
if res.status_code == 200:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="delete-feature",
short_help="Delete a single feature from a dataset")
@click.argument('dataset', required=True)
@click.argument('fid', required=True)
@click.pass_context
def delete_feature(ctx, dataset, fid):
"""Delete a feature.
$ mapbox datasets delete-feature dataset-id feature-id
All endpoints require authentication. An access token with
`datasets:write` scope is required, see `mapbox --help`.
"""
service = ctx.obj.get('service')
res = service.delete_feature(dataset, fid)
if res.status_code != 204:
raise MapboxCLIException(res.text.strip())
@datasets.command(name="create-tileset",
short_help="Generate a tileset from a dataset")
@click.argument('dataset', required=True)
@click.argument('tileset', required=True)
@click.option('--name', '-n', default=None, help="Name for the tileset")
@click.pass_context
def create_tileset(ctx, dataset, tileset, name):
"""Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip())
|
mapbox/mapbox-cli-py
|
mapboxcli/scripts/datasets.py
|
Python
|
mit
| 9,701
|
[
"VisIt"
] |
ce448333e6ecad7eca41080e1b8a85ad2176a53c7576b26b1866600974093510
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a FloatWithUnit, which is a subclass of float. It
also defines supported units for some commonly used units for energy, length,
temperature, time and charge. FloatWithUnit also support conversion to one
another, and additions and subtractions perform automatic conversion if
units are detected. An ArrayWithUnit is also implemented, which is a subclass
of numpy's ndarray with similar unit features.
"""
import collections
import numbers
from functools import partial
import numpy as np
import scipy.constants as const
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
"""
Some conversion factors
"""
Ha_to_eV = 1 / const.physical_constants["electron volt-hartree relationship"][0]
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = const.physical_constants["atomic mass unit-kilogram relationship"][0]
mile_to_meters = const.mile
bohr_to_angstrom = const.physical_constants["Bohr radius"][0] * 1e10
bohr_to_ang = bohr_to_angstrom
ang_to_bohr = 1 / bohr_to_ang
kCal_to_kJ = const.calorie
kb = const.physical_constants["Boltzmann constant in eV/K"][0]
"""
Definitions of supported units. Values below are essentially scaling and
conversion factors. What matters is the relative values, not the absolute.
The SI units must have factor 1.
"""
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
"d": 3600 * 24,
},
"current": {"A": 1},
"temperature": {
"K": 1,
},
"amount": {"mol": 1, "atom": 1 / const.N_A},
"intensity": {"cd": 1},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024 ** 2,
"Gb": 1024 ** 3,
"Tb": 1024 ** 4,
},
}
# Accept kb, mb, gb ... as well.
BASE_UNITS["memory"].update({k.lower(): v for k, v in BASE_UNITS["memory"].items()})
# This current list are supported derived units defined in terms of powers of
# SI base units and constants.
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, const.e: 1},
"meV": {"kg": 1, "m": 2, "s": -2, const.e * 1e-3: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, const.e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, const.e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1},
"kCal": {"kg": 1, "m": 2, "s": -2, 1000: 1, kCal_to_kJ: 1},
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, const.e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2},
"KN": {"kg": 1, "m": 1, "s": -2, 1000: 1},
"MN": {"kg": 1, "m": 1, "s": -2, 1e6: 1},
"GN": {"kg": 1, "m": 1, "s": -2, 1e9: 1},
},
"frequency": {
"Hz": {"s": -1},
"KHz": {"s": -1, 1000: 1},
"MHz": {"s": -1, 1e6: 1},
"GHz": {"s": -1, 1e9: 1},
"THz": {"s": -1, 1e12: 1},
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1},
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3},
"KW": {"m": 2, "kg": 1, "s": -3, 1000: 1},
"MW": {"m": 2, "kg": 1, "s": -3, 1e6: 1},
"GW": {"m": 2, "kg": 1, "s": -3, 1e9: 1},
},
"emf": {"V": {"m": 2, "kg": 1, "s": -3, "A": -1}},
"capacitance": {"F": {"m": -2, "kg": -1, "s": 4, "A": 2}},
"resistance": {"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}},
"conductance": {"S": {"m": -2, "kg": -1, "s": 3, "A": 2}},
"magnetic_flux": {"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}},
"cross_section": {"barn": {"m": 2, 1e-28: 1}, "mbarn": {"m": 2, 1e-31: 1}},
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items())) # type: ignore
SUPPORTED_UNIT_NAMES = tuple(i for d in ALL_UNITS.values() for i in d.keys())
# Mapping unit name --> unit type (unit names must be unique).
_UNAME2UTYPE = {} # type: ignore
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1, BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
"""
Exception class for unit errors.
"""
def _check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all(v2.get(ku, 0) == vu for ku, vu in u.items()) and all(
u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()
):
return {k2: 1}
return u
class Unit(collections.abc.Mapping):
"""
Represents a unit, e.g., "m" for meters, etc. Supports compound units.
Only integer powers are supported for units.
"""
Error = UnitError
def __init__(self, unit_def):
"""
Constructs a unit.
Args:
unit_def: A definition for the unit. Either a mapping of unit to
powers, e.g., {"m": 2, "s": -1} represents "m^2 s^-1",
or simply as a string "kg m^2 s^-1". Note that the supported
format uses "^" as the power operator and all units must be
space-separated.
"""
if isinstance(unit_def, str):
unit = collections.defaultdict(int)
import re
for m in re.finditer(r"([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = _check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(), key=lambda k: (-self._unit[k], k))
return " ".join(
["{}^{}".format(k, self._unit[k]) if self._unit[k] != 1 else k for k in sorted_keys if self._unit[k] != 0]
)
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
"""
Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor.
"""
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, numbers.Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(), key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(), key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
"""
Subclasses float to attach a unit type. Typically, you should use the
pre-defined unit type subclasses such as Energy, Length, etc. instead of
using FloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity). Note that FloatWithUnit does not override the eq
method for float, i.e., units are not checked when testing for equality.
The reason is to allow this class to be used transparently wherever floats
are expected.
>>> e = Energy(1.1, "Ha")
>>> a = Energy(1.1, "Ha")
>>> b = Energy(3, "eV")
>>> c = a + b
>>> print(c)
1.2102479761938871 Ha
>>> c.to("eV")
32.932522246000005 eV
"""
Error = UnitError
@classmethod
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
"""Overrides __new__ since we are subclassing a Python primitive/"""
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
"""
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
"""
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError("{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super().__repr__()
def __str__(self):
s = super().__str__()
return "{} {}".format(s, self._unit)
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super().__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type, unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super().__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type, unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None, unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None, unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None, unit=self._unit ** i)
def __truediv__(self, other):
val = super().__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(val, unit_type=None, unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super().__neg__(), unit_type=self._unit_type, unit=self._unit)
def __getnewargs__(self):
"""Function used by pickle to recreate object."""
# print(self.__dict__)
# FIXME
# There's a problem with _unit_type if we try to unpickle objects from file.
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
# print("in getstate %s" % state)
return state
def __setstate__(self, state):
# print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def to(self, new_unit):
"""
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of
units of each type.
Args:
new_unit: New unit type.
Returns:
A FloatWithUnit object in the new units.
Example usage:
>>> e = Energy(1.1, "eV")
>>> e = Energy(1.1, "Ha")
>>> e.to("eV")
29.932522246 eV
"""
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit,
)
@property
def as_base_units(self):
"""
Returns this FloatWithUnit in base SI units, including derived units.
Returns:
A FloatWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
"""
Subclasses `numpy.ndarray` to attach a unit type. Typically, you should
use the pre-defined unit type subclasses such as EnergyArray,
LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity).
>>> a = EnergyArray([1, 2], "Ha")
>>> b = EnergyArray([1, 2], "eV")
>>> c = a + b
>>> print(c)
[ 1.03674933 2.07349865] Ha
>>> c.to("eV")
array([ 28.21138386, 56.42276772]) eV
"""
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
"""
Override __new__.
"""
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
"""
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
comments.
"""
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def __reduce__(self):
# print("in reduce")
reduce = list(super().__reduce__())
# print("unit",self._unit)
# print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
# pylint: disable=E1101
super().__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return "{} {}".format(np.array(self).__repr__(), self.unit)
def __str__(self):
return "{} {}".format(np.array(self).__str__(), self.unit)
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is" " not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other), unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other), unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(np.array(self).__mul__(np.array(other)), unit=self.unit * other.unit)
def __rmul__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__rmul__(np.array(other)), unit=self.unit * other.unit)
def __div__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__div__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__div__(np.array(other)), unit=self.unit / other.unit)
def __truediv__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__truediv__(np.array(other)), unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(), unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
"""
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
"""
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type,
unit=new_unit,
)
@property
def as_base_units(self):
"""
Returns this ArrayWithUnit in base SI units, including derived units.
Returns:
An ArrayWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
# TODO abstract base class property?
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return ALL_UNITS[self.unit_type]
# TODO abstract base class method?
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
"""
Partial returns a partial object and therefore we cannot inherit class
methods defined in FloatWithUnit. This function calls partial and patches
the new class before returning.
"""
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
"""
A float with an energy unit.
Args:
val (float): Value
unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.
"""
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
"""
A float with a length unit.
Args:
val (float): Value
unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is
raised.
"""
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
"""
A float with a mass unit.
Args:
val (float): Value
unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is
raised.
"""
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
"""
A float with a temperature unit.
Args:
val (float): Value
unit (Unit): E.g., K. Only K (kelvin) is supported.
"""
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
"""
A float with a time unit.
Args:
val (float): Value
unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is
raised.
"""
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
"""
A float with a charge unit.
Args:
val (float): Value
unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError
is raised.
"""
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
"""
A float with a memory unit.
Args:
val (float): Value
unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError
is raised.
"""
def obj_with_unit(obj, unit):
"""
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
"""
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
if isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k, v in obj.items()}
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
"""
Useful decorator to assign units to the output of a function. You can also
use it to standardize the output units of a function that already returns
a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences
are assigned the same unit. It works with Python sequences only. The creation
of numpy arrays loses all unit information. For mapping types, the values
are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, (FloatWithUnit, ArrayWithUnit)):
return val.to(unit)
if isinstance(val, collections.abc.Sequence):
# TODO: why don't we return a ArrayWithUnit?
# This complicated way is to ensure the sequence type is
# preserved (list or tuple).
return val.__class__([FloatWithUnit(i, unit_type=unit_type, unit=unit) for i in val])
if isinstance(val, collections.abc.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError("Don't know how to assign units to %s" % str(val))
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
|
gmatteo/pymatgen
|
pymatgen/core/units.py
|
Python
|
mit
| 27,212
|
[
"pymatgen"
] |
e3d96db331097b2d8672faa13b38cee8b7412b8cc6b770767bbb82fe90bf4867
|
"""
Acceptance tests for Studio related to the container page.
The container page is used both for displaying units, and
for displaying containers within units.
"""
import datetime
from unittest import skip
import ddt
from nose.plugins.attrib import attr
from base_studio_test import ContainerBase
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.pages.studio.component_editor import ComponentEditorView, ComponentVisibilityEditorView
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.html_component_editor import HtmlComponentEditorView
from common.test.acceptance.pages.studio.move_xblock import MoveModalView
from common.test.acceptance.pages.studio.utils import add_discussion, drag
from common.test.acceptance.tests.helpers import create_user_partition_json
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
class NestedVerticalTest(ContainerBase):
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
self.container_title = ""
self.group_a = "Group A"
self.group_b = "Group B"
self.group_empty = "Group Empty"
self.group_a_item_1 = "Group A Item 1"
self.group_a_item_2 = "Group A Item 2"
self.group_b_item_1 = "Group B Item 1"
self.group_b_item_2 = "Group B Item 2"
self.group_a_handle = 0
self.group_a_item_1_handle = 1
self.group_a_item_2_handle = 2
self.group_empty_handle = 3
self.group_b_handle = 4
self.group_b_item_1_handle = 5
self.group_b_item_2_handle = 6
self.group_a_item_1_action_index = 0
self.group_a_item_2_action_index = 1
self.duplicate_label = "Duplicate of '{0}'"
self.discussion_label = "Discussion"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', 'Test Container').add_children(
XBlockFixtureDesc('vertical', 'Group A').add_children(
XBlockFixtureDesc('html', self.group_a_item_1),
XBlockFixtureDesc('html', self.group_a_item_2)
),
XBlockFixtureDesc('vertical', 'Group Empty'),
XBlockFixtureDesc('vertical', 'Group B').add_children(
XBlockFixtureDesc('html', self.group_b_item_1),
XBlockFixtureDesc('html', self.group_b_item_2)
)
)
)
)
)
)
@skip("Flaky: 01/16/2015")
@attr(shard=1)
class DragAndDropTest(NestedVerticalTest):
"""
Tests of reordering within the container page.
"""
def drag_and_verify(self, source, target, expected_ordering):
self.do_action_and_verify(
lambda (container): drag(container, source, target, 40),
expected_ordering
)
def test_reorder_in_group(self):
"""
Drag Group A Item 2 before Group A Item 1.
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2, self.group_a_item_1]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_a_item_2_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_to_top(self):
"""
Drag Group A Item 1 to top level (outside of Group A).
"""
expected_ordering = [{self.container_title: [self.group_a_item_1, self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_a_item_1_handle, self.group_a_handle, expected_ordering)
def test_drag_into_different_group(self):
"""
Drag Group B Item 1 into Group A (first element).
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_group_into_group(self):
"""
Drag Group B into Group A (first element).
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty]},
{self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_after_addition(self):
"""
Add some components and then verify that drag and drop still works.
"""
group_a_menu = 0
def add_new_components_and_rearrange(container):
# Add a video component to Group 1
add_discussion(container, group_a_menu)
# Duplicate the first item in Group A
container.duplicate(self.group_a_item_1_action_index)
first_handle = self.group_a_item_1_handle
# Drag newly added video component to top.
drag(container, first_handle + 3, first_handle, 40)
# Drag duplicated component to top.
drag(container, first_handle + 2, first_handle, 40)
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [duplicate_label, self.discussion_label, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.do_action_and_verify(add_new_components_and_rearrange, expected_ordering)
@attr(shard=1)
class AddComponentTest(NestedVerticalTest):
"""
Tests of adding a component to the container page.
"""
def add_and_verify(self, menu_index, expected_ordering):
self.do_action_and_verify(
lambda (container): add_discussion(container, menu_index),
expected_ordering
)
def test_add_component_in_group(self):
group_b_menu = 2
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]},
{self.group_empty: []}]
self.add_and_verify(group_b_menu, expected_ordering)
def test_add_component_in_empty_group(self):
group_empty_menu = 1
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: [self.discussion_label]}]
self.add_and_verify(group_empty_menu, expected_ordering)
def test_add_component_in_container(self):
container_menu = 3
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.add_and_verify(container_menu, expected_ordering)
@attr(shard=1)
class DuplicateComponentTest(NestedVerticalTest):
"""
Tests of duplicating a component on the container page.
"""
def duplicate_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.duplicate(source_index),
expected_ordering
)
def test_duplicate_first_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering)
def test_duplicate_second_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_2)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering)
def test_duplicate_the_duplicate(self):
first_duplicate_label = self.duplicate_label.format(self.group_a_item_1)
second_duplicate_label = self.duplicate_label.format(first_duplicate_label)
expected_ordering = [
{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}
]
def duplicate_twice(container):
container.duplicate(self.group_a_item_1_action_index)
container.duplicate(self.group_a_item_1_action_index + 1)
self.do_action_and_verify(duplicate_twice, expected_ordering)
@attr(shard=1)
class DeleteComponentTest(NestedVerticalTest):
"""
Tests of deleting a component from the container page.
"""
def delete_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.delete(source_index),
expected_ordering
)
def test_delete_first_in_group(self):
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
# Group A itself has a delete icon now, so item_1 is index 1 instead of 0.
group_a_item_1_delete_index = 1
self.delete_and_verify(group_a_item_1_delete_index, expected_ordering)
@attr(shard=1)
class EditContainerTest(NestedVerticalTest):
"""
Tests of editing a container.
"""
def modify_display_name_and_verify(self, component):
"""
Helper method for changing a display name.
"""
modified_name = 'modified'
self.assertNotEqual(component.name, modified_name)
component.edit()
component_editor = ComponentEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Display Name', modified_name)
self.assertEqual(component.name, modified_name)
def test_edit_container_on_unit_page(self):
"""
Test the "edit" button on a container appearing on the unit page.
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
self.modify_display_name_and_verify(component)
def test_edit_container_on_container_page(self):
"""
Test the "edit" button on a container appearing on the container page.
"""
container = self.go_to_nested_container_page()
self.modify_display_name_and_verify(container)
def test_edit_raw_html(self):
"""
Test the raw html editing functionality.
"""
modified_content = "<p>modified content</p>"
#navigate to and open the component for editing
unit = self.go_to_unit_page()
container = unit.xblocks[1].go_to_container()
component = container.xblocks[1].children[0]
component.edit()
html_editor = HtmlComponentEditorView(self.browser, component.locator)
html_editor.set_content_and_save(modified_content, raw=True)
#note we're expecting the <p> tags to have been removed
self.assertEqual(component.student_content, "modified content")
class BaseGroupConfigurationsTest(ContainerBase):
ALL_LEARNERS_AND_STAFF = ComponentVisibilityEditorView.ALL_LEARNERS_AND_STAFF
CHOOSE_ONE = "Select a group type"
CONTENT_GROUP_PARTITION = ComponentVisibilityEditorView.CONTENT_GROUP_PARTITION
ENROLLMENT_TRACK_PARTITION = ComponentVisibilityEditorView.ENROLLMENT_TRACK_PARTITION
MISSING_GROUP_LABEL = 'Deleted Group\nThis group no longer exists. Choose another group or do not restrict access to this component.'
VALIDATION_ERROR_LABEL = 'This component has validation issues.'
VALIDATION_ERROR_MESSAGE = "Error:\nThis component's access settings refer to deleted or invalid groups."
GROUP_VISIBILITY_MESSAGE = 'Access to some content in this unit is restricted to specific groups of learners.'
def setUp(self):
super(BaseGroupConfigurationsTest, self).setUp()
# Set up a cohort-schemed user partition
self.id_base = MINIMUM_STATIC_PARTITION_ID
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
self.id_base,
self.CONTENT_GROUP_PARTITION,
'Content Group Partition',
[
Group(self.id_base + 1, 'Dogs'),
Group(self.id_base + 2, 'Cats')
],
scheme="cohort"
)
],
},
})
self.container_page = self.go_to_unit_page()
self.html_component = self.container_page.xblocks[1]
def populate_course_fixture(self, course_fixture):
"""
Populate a simple course a section, subsection, and unit, and HTML component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Component')
)
)
)
)
def edit_component_visibility(self, component):
"""
Edit the visibility of an xblock on the container page.
"""
component.edit_visibility()
return ComponentVisibilityEditorView(self.browser, component.locator)
def verify_current_groups_message(self, visibility_editor, expected_current_groups):
"""
Check that the current visibility is displayed at the top of the dialog.
"""
if expected_current_groups == self.ALL_LEARNERS_AND_STAFF:
self.assertEqual("Access is not restricted", visibility_editor.current_groups_message)
else:
self.assertEqual(
"Access is restricted to: {groups}".format(groups=expected_current_groups),
visibility_editor.current_groups_message
)
def verify_selected_partition_scheme(self, visibility_editor, expected_scheme):
"""
Check that the expected partition scheme is selected.
"""
self.assertItemsEqual(expected_scheme, visibility_editor.selected_partition_scheme)
def verify_selected_groups(self, visibility_editor, expected_groups):
"""
Check the expected partition groups.
"""
self.assertItemsEqual(expected_groups, [group.text for group in visibility_editor.selected_groups])
def select_and_verify_saved(self, component, partition_label, groups=[]):
"""
Edit the visibility of an xblock on the container page and
verify that the edit persists. Note that `groups`
are labels which should be clicked, but not necessarily checked.
"""
# Make initial edit(s) and save
visibility_editor = self.edit_component_visibility(component)
visibility_editor.select_groups_in_partition_scheme(partition_label, groups)
# Re-open the modal and inspect its selected inputs. If no groups were selected,
# "All Learners" should be selected partitions scheme, and we show "Select a group type" in the select.
if not groups:
partition_label = self.CHOOSE_ONE
visibility_editor = self.edit_component_visibility(component)
self.verify_selected_partition_scheme(visibility_editor, partition_label)
self.verify_selected_groups(visibility_editor, groups)
visibility_editor.save()
def verify_component_validation_error(self, component):
"""
Verify that we see validation errors for the given component.
"""
self.assertTrue(component.has_validation_error)
self.assertEqual(component.validation_error_text, self.VALIDATION_ERROR_LABEL)
self.assertEqual([self.VALIDATION_ERROR_MESSAGE], component.validation_error_messages)
def verify_visibility_set(self, component, is_set):
"""
Verify that the container page shows that component visibility
settings have been edited if `is_set` is True; otherwise
verify that the container page shows no such information.
"""
if is_set:
self.assertIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message)
self.assertTrue(component.has_group_visibility_set)
else:
self.assertNotIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message)
self.assertFalse(component.has_group_visibility_set)
def update_component(self, component, metadata):
"""
Update a component's metadata and refresh the page.
"""
self.course_fixture._update_xblock(component.locator, {'metadata': metadata})
self.browser.refresh()
self.container_page.wait_for_page()
def remove_missing_groups(self, visibility_editor, component):
"""
Deselect the missing groups for a component. After save,
verify that there are no missing group messages in the modal
and that there is no validation error on the component.
"""
for option in visibility_editor.all_group_options:
if option.text == self.MISSING_GROUP_LABEL:
option.click()
visibility_editor.save()
visibility_editor = self.edit_component_visibility(component)
self.assertNotIn(self.MISSING_GROUP_LABEL, [item.text for item in visibility_editor.all_group_options])
visibility_editor.cancel()
self.assertFalse(component.has_validation_error)
@attr(shard=3)
class ContentGroupVisibilityModalTest(BaseGroupConfigurationsTest):
"""
Tests of the visibility settings modal for components on the unit
page (content groups).
"""
def test_default_selection(self):
"""
Scenario: The component visibility modal selects visible to all by default.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
Then the default visibility selection should be 'All Students and Staff'
And the container page should not display the content visibility warning
"""
visibility_dialog = self.edit_component_visibility(self.html_component)
self.verify_current_groups_message(visibility_dialog, self.ALL_LEARNERS_AND_STAFF)
self.verify_selected_partition_scheme(visibility_dialog, self.CHOOSE_ONE)
visibility_dialog.cancel()
self.verify_visibility_set(self.html_component, False)
def test_reset_to_all_students_and_staff(self):
"""
Scenario: The component visibility modal can be set to be visible to all students and staff.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs'
And I save the modal
Then the container page should display the content visibility warning
And I re-open the visibility editor modal for that unit's component
And I select 'All Students and Staff'
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And the container page should not display the content visibility warning
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs'])
self.verify_visibility_set(self.html_component, True)
self.select_and_verify_saved(self.html_component, self.ALL_LEARNERS_AND_STAFF)
self.verify_visibility_set(self.html_component, False)
def test_select_single_content_group(self):
"""
Scenario: The component visibility modal can be set to be visible to one content group.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs'
And I save the modal
Then the visibility selection should be 'Dogs' and 'Specific Content Groups'
And the container page should display the content visibility warning
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs'])
self.verify_visibility_set(self.html_component, True)
def test_select_multiple_content_groups(self):
"""
Scenario: The component visibility modal can be set to be visible to multiple content groups.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs' and 'Cats'
And I save the modal
Then the visibility selection should be 'Dogs', 'Cats', and 'Specific Content Groups'
And the container page should display the content visibility warning
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs', 'Cats'])
self.verify_visibility_set(self.html_component, True)
def test_select_zero_content_groups(self):
"""
Scenario: The component visibility modal can not be set to be visible to 'Specific Content Groups' without
selecting those specific groups.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Specific Content Groups'
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And the container page should not display the content visibility warning
"""
self.select_and_verify_saved(
self.html_component, self.CONTENT_GROUP_PARTITION
)
self.verify_visibility_set(self.html_component, False)
def test_missing_groups(self):
"""
Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown
group ids.
Given I have a unit with one component
And that component's group access specifies multiple invalid group ids
When I go to the container page for that unit
Then I should see a validation error message on that unit's component
And I open the visibility editor modal for that unit's component
Then I should see that I have selected multiple deleted groups
And the container page should display the content visibility warning
And I de-select the missing groups
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And I should not see any validation errors on the component
And the container page should not display the content visibility warning
"""
self.update_component(
self.html_component,
{'group_access': {self.id_base: [self.id_base + 3, self.id_base + 4]}}
)
self._verify_and_remove_missing_content_groups(
"Deleted Group, Deleted Group",
[self.MISSING_GROUP_LABEL] * 2
)
self.verify_visibility_set(self.html_component, False)
def test_found_and_missing_groups(self):
"""
Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown
group ids and multiple known group ids.
Given I have a unit with one component
And that component's group access specifies multiple invalid and valid group ids
When I go to the container page for that unit
Then I should see a validation error message on that unit's component
And I open the visibility editor modal for that unit's component
Then I should see that I have selected multiple deleted groups
And the container page should display the content visibility warning
And I de-select the missing groups
And I save the modal
Then the visibility selection should be the names of the valid groups.
And I should not see any validation errors on the component
And the container page should display the content visibility warning
"""
self.update_component(
self.html_component,
{'group_access': {self.id_base: [self.id_base + 1, self.id_base + 2, self.id_base + 3, self.id_base + 4]}}
)
self._verify_and_remove_missing_content_groups(
'Dogs, Cats, Deleted Group, Deleted Group',
['Dogs', 'Cats'] + [self.MISSING_GROUP_LABEL] * 2
)
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_selected_partition_scheme(visibility_editor, self.CONTENT_GROUP_PARTITION)
expected_groups = ['Dogs', 'Cats']
self.verify_current_groups_message(visibility_editor, ", ".join(expected_groups))
self.verify_selected_groups(visibility_editor, expected_groups)
self.verify_visibility_set(self.html_component, True)
def _verify_and_remove_missing_content_groups(self, current_groups_message, all_group_labels):
self.verify_component_validation_error(self.html_component)
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_selected_partition_scheme(visibility_editor, self.CONTENT_GROUP_PARTITION)
self.verify_current_groups_message(visibility_editor, current_groups_message)
self.verify_selected_groups(visibility_editor, all_group_labels)
self.remove_missing_groups(visibility_editor, self.html_component)
@attr(shard=3)
class EnrollmentTrackVisibilityModalTest(BaseGroupConfigurationsTest):
"""
Tests of the visibility settings modal for components on the unit
page (enrollment tracks).
"""
AUDIT_TRACK = "Audit Track"
VERIFIED_TRACK = "Verified Track"
def setUp(self):
super(EnrollmentTrackVisibilityModalTest, self).setUp()
# Add an audit mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'audit', mode_display_name=self.AUDIT_TRACK).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified',
mode_display_name=self.VERIFIED_TRACK, min_price=10
).visit()
self.container_page = self.go_to_unit_page()
self.html_component = self.container_page.xblocks[1]
# Initially set visibility to Verified track.
self.update_component(
self.html_component,
{'group_access': {ENROLLMENT_TRACK_PARTITION_ID: [2]}} # "2" is Verified
)
def verify_component_group_visibility_messsage(self, component, expected_groups):
"""
Verifies that the group visibility message below the component display name is correct.
"""
if not expected_groups:
self.assertIsNone(component.get_partition_group_message)
else:
self.assertEqual("Access restricted to: " + expected_groups, component.get_partition_group_message)
def test_setting_enrollment_tracks(self):
"""
Test that enrollment track groups can be selected.
"""
# Verify that the "Verified" Group is shown on the unit page (under the unit display name).
self.verify_component_group_visibility_messsage(self.html_component, "Verified Track")
# Open dialog with "Verified" already selected.
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_current_groups_message(visibility_editor, self.VERIFIED_TRACK)
self.verify_selected_partition_scheme(
visibility_editor,
self.ENROLLMENT_TRACK_PARTITION
)
self.verify_selected_groups(visibility_editor, [self.VERIFIED_TRACK])
visibility_editor.cancel()
# Select "All Learners and Staff". The helper method saves the change,
# then reopens the dialog to verify that it was persisted.
self.select_and_verify_saved(self.html_component, self.ALL_LEARNERS_AND_STAFF)
self.verify_component_group_visibility_messsage(self.html_component, None)
# Select "Audit" enrollment track. The helper method saves the change,
# then reopens the dialog to verify that it was persisted.
self.select_and_verify_saved(self.html_component, self.ENROLLMENT_TRACK_PARTITION, [self.AUDIT_TRACK])
self.verify_component_group_visibility_messsage(self.html_component, "Audit Track")
@attr(shard=1)
class UnitPublishingTest(ContainerBase):
"""
Tests of the publishing control and related widgets on the Unit page.
"""
PUBLISHED_STATUS = "Publishing Status\nPublished (not yet released)"
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
LOCKED_STATUS = "Publishing Status\nVisible to Staff Only"
RELEASE_TITLE_RELEASED = "RELEASED:"
RELEASE_TITLE_RELEASE = "RELEASE:"
LAST_PUBLISHED = 'Last published'
LAST_SAVED = 'Draft saved on'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with a unit and a single HTML child.
"""
self.html_content = '<p><strong>Body of HTML Unit.</strong></p>'
self.courseware = CoursewarePage(self.browser, self.course_id)
past_start_date = datetime.datetime(1974, 6, 22)
self.past_start_date_text = "Jun 22, 1974 at 00:00 UTC"
future_start_date = datetime.datetime(2100, 9, 13)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Test html', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unlocked Section',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('problem', '<problem></problem>', data=self.html_content)
)
)
),
XBlockFixtureDesc('chapter', 'Section With Locked Unit').add_children(
XBlockFixtureDesc(
'sequential',
'Subsection With Locked Unit',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc(
'vertical',
'Locked Unit',
metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('discussion', '', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unreleased Section',
metadata={'start': future_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unreleased Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unreleased Unit')
)
)
)
def test_publishing(self):
"""
Scenario: The publish title changes based on whether or not draft content exists
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the title in the Publish information box is "Published and Live"
And the Publish button is disabled
And the last published text contains "Last published"
And the last saved text contains "Last published"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the last saved text contains "Draft saved on"
And the Publish button is enabled
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And the last published text contains "Last published"
And the last saved text contains "Last published"
"""
unit = self.go_to_unit_page()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
# Start date set in course fixture to 1970.
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, 'Jan 01, 1970 at 00:00 UTC\nwith Section "Test Section"'
)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
# Should not be able to click on Publish action -- but I don't know how to test that it is not clickable.
# TODO: continue discussion with Muhammad and Jay about this.
# Add a component to the page so it will have unpublished changes.
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_SAVED)
unit.publish_action.click()
unit.wait_for_ajax()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
def test_discard_changes(self):
"""
Scenario: The publish title changes after "Discard Changes" is clicked
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the Discard Changes button is disabled
And I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the Discard Changes button is enabled
And when I click the Discard Changes button
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.discard_changes()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
def test_view_live_no_changes(self):
"""
Scenario: "View Live" shows published content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the View Live button is enabled
And when I click on the View Live button
Then I see the published content in LMS
"""
unit = self.go_to_unit_page()
self._view_published_version(unit)
self._verify_components_visible(['html'])
def test_view_live_changes(self):
"""
Scenario: "View Live" does not show draft content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click on the View Live button
Then I see the published content in LMS
And I do not see the unpublished component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
self._view_published_version(unit)
self._verify_components_visible(['html'])
self.assertEqual(self.html_content, self.courseware.xblock_component_html_content(0))
def test_view_live_after_publish(self):
"""
Scenario: "View Live" shows newly published content
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click the Publish button
And when I click on the View Live button
Then I see the newly published component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.publish_action.click()
self._view_published_version(unit)
self._verify_components_visible(['html', 'discussion'])
def test_initially_unlocked_visible_to_students(self):
"""
Scenario: An unlocked unit with release date in the past is visible to students
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
Then the unit has a warning that it is visible to students
And it is marked as "RELEASED" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, self.past_start_date_text + '\n' + 'with Section "Unlocked Section"'
)
self._view_published_version(unit)
self._verify_student_view_visible(['problem'])
def test_locked_visible_to_staff_only(self):
"""
Scenario: After locking a unit with release date in the past, it is only visible to staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I select "Hide from students"
Then the unit does not have a warning that it is visible to students
And the unit does not display inherited staff lock
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
checked = unit.toggle_staff_lock()
self.assertTrue(checked)
self.assertFalse(unit.currently_visible_to_students)
self.assertFalse(unit.shows_inherited_staff_lock())
unit.verify_publish_title(self.LOCKED_STATUS)
self._view_published_version(unit)
# Will initially be in staff view, locked component should be visible.
self._verify_components_visible(['problem'])
# Switch to student view and verify not visible
self._verify_student_view_locked()
def test_initially_locked_not_visible_to_students(self):
"""
Scenario: A locked unit with release date in the past is not visible to students
Given I have a published locked unit with release date in the past
When I go to the unit page in Studio
Then the unit does not have a warning that it is visible to students
And it is marked as "RELEASE" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
unit.verify_publish_title(self.LOCKED_STATUS)
self.assertFalse(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASE,
self.past_start_date_text + '\n' + 'with Subsection "Subsection With Locked Unit"'
)
self._view_published_version(unit)
self._verify_student_view_locked()
def test_unlocked_visible_to_all(self):
"""
Scenario: After unlocking a unit with release date in the past, it is visible to both students and staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I deselect "Hide from students"
Then the unit does have a warning that it is visible to students
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
checked = unit.toggle_staff_lock()
self.assertFalse(checked)
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._view_published_version(unit)
# Will initially be in staff view, components always visible.
self._verify_components_visible(['discussion'])
# Switch to student view and verify visible.
self._verify_student_view_visible(['discussion'])
def test_explicit_lock_overrides_implicit_subsection_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
subsection = self.outline.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_explicit_lock_overrides_implicit_section_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
section = self.outline.section_at(0)
unit = section.subsection_at(0).unit_at(0)
section.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_published_unit_with_draft_child(self):
"""
Scenario: A published unit with a draft child can be published
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And edit the content of the only component
Then the content changes
And the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And when I click the View Live button
Then I see the changed content in LMS
"""
modified_content = 'modified content'
unit = self.go_to_unit_page()
component = unit.xblocks[1]
component.edit()
HtmlComponentEditorView(self.browser, component.locator).set_content_and_save(modified_content)
self.assertEqual(component.student_content, modified_content)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._view_published_version(unit)
self.assertIn(modified_content, self.courseware.xblock_component_html_content(0))
def test_cancel_does_not_create_draft(self):
"""
Scenario: Editing a component and then canceling does not create a draft version (TNL-399)
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And edit the content of an HTML component and then press cancel
Then the content does not change
And the title in the Publish information box is "Published and Live"
And when I reload the page
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
component.edit()
HtmlComponentEditorView(self.browser, component.locator).set_content_and_cancel("modified content")
self.assertEqual(component.student_content, "Body of HTML Unit.")
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.browser.refresh()
unit.wait_for_page()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
def test_delete_child_in_published_unit(self):
"""
Scenario: A published unit can be published again after deleting a child
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And delete the only component
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And when I click the View Live button
Then I see an empty unit in LMS
"""
unit = self.go_to_unit_page()
unit.delete(0)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._view_published_version(unit)
self.assertEqual(0, self.courseware.num_xblock_components)
def test_published_not_live(self):
"""
Scenario: The publish title displays correctly for units that are not live
Given I have a published unit with no unpublished changes that releases in the future
When I go to the unit page in Studio
Then the title in the Publish information box is "Published (not yet released)"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published (not yet released)"
"""
unit = self.go_to_unit_page('Unreleased Section', 'Unreleased Subsection', 'Unreleased Unit')
unit.verify_publish_title(self.PUBLISHED_STATUS)
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
unit.verify_publish_title(self.PUBLISHED_STATUS)
def _view_published_version(self, unit):
"""
Goes to the published version, then waits for the browser to load the page.
"""
unit.view_published_version()
self.assertEqual(len(self.browser.window_handles), 2)
self.courseware.wait_for_page()
def _verify_and_return_staff_page(self):
"""
Verifies that the browser is on the staff page and returns a StaffCoursewarePage.
"""
page = StaffCoursewarePage(self.browser, self.course_id)
page.wait_for_page()
return page
def _verify_student_view_locked(self):
"""
Verifies no component is visible when viewing as a student.
"""
self._verify_and_return_staff_page().set_staff_view_mode('Learner')
self.assertEqual(0, self.courseware.num_xblock_components)
def _verify_student_view_visible(self, expected_components):
"""
Verifies expected components are visible when viewing as a student.
"""
self._verify_and_return_staff_page().set_staff_view_mode('Learner')
self._verify_components_visible(expected_components)
def _verify_components_visible(self, expected_components):
"""
Verifies the expected components are visible (and there are no extras).
"""
self.assertEqual(len(expected_components), self.courseware.num_xblock_components)
for index, component in enumerate(expected_components):
self.assertEqual(component, self.courseware.xblock_component_type(index))
def _verify_release_date_info(self, unit, expected_title, expected_date):
"""
Verifies how the release date is displayed in the publishing sidebar.
"""
self.assertEqual(expected_title, unit.release_title)
self.assertEqual(expected_date, unit.release_date)
def _verify_last_published_and_saved(self, unit, expected_published_prefix, expected_saved_prefix):
"""
Verifies that last published and last saved messages respectively contain the given strings.
"""
self.assertIn(expected_published_prefix, unit.last_published_text)
self.assertIn(expected_saved_prefix, unit.last_saved_text)
def _verify_explicit_lock_overrides_implicit_lock_information(self, unit_page):
"""
Verifies that a unit with inherited staff lock does not display inherited information when explicitly locked.
"""
self.assertTrue(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertFalse(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertTrue(unit_page.shows_inherited_staff_lock())
# TODO: need to work with Jay/Christine to get testing of "Preview" working.
# def test_preview(self):
# unit = self.go_to_unit_page()
# add_discussion(unit)
# unit.preview()
# self.assertEqual(2, self.courseware.num_xblock_components)
# self.assertEqual('html', self.courseware.xblock_component_type(0))
# self.assertEqual('discussion', self.courseware.xblock_component_type(1))
@attr(shard=3)
class DisplayNameTest(ContainerBase):
"""
Test consistent use of display_name_with_default
"""
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', None)
)
)
)
)
def test_display_name_default(self):
"""
Scenario: Given that an XBlock with a dynamic display name has been added to the course,
When I view the unit page and note the display name of the block,
Then I see the dynamically generated display name,
And when I then go to the container page for that same block,
Then I see the same generated display name.
"""
# Unfortunately no blocks in the core platform implement display_name_with_default
# in an interesting way for this test, so we are just testing for consistency and not
# the actual value.
unit = self.go_to_unit_page()
test_block = unit.xblocks[1]
title_on_unit_page = test_block.name
container = test_block.go_to_container()
self.assertEqual(container.name, title_on_unit_page)
@attr(shard=3)
class ProblemCategoryTabsTest(ContainerBase):
"""
Test to verify tabs in problem category.
"""
def setUp(self, is_staff=True):
super(ProblemCategoryTabsTest, self).setUp(is_staff=is_staff)
def populate_course_fixture(self, course_fixture):
"""
Sets up course structure.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def test_correct_tabs_present(self):
"""
Scenario: Verify that correct tabs are present in problem category.
Given I am a staff user
When I go to unit page
Then I only see `Common Problem Types` and `Advanced` tabs in `problem` category
"""
self.go_to_unit_page()
page = ContainerPage(self.browser, None)
self.assertEqual(page.get_category_tab_names('problem'), ['Common Problem Types', 'Advanced'])
def test_common_problem_types_tab(self):
"""
Scenario: Verify that correct components are present in Common Problem Types tab.
Given I am a staff user
When I go to unit page
Then I see correct components under `Common Problem Types` tab in `problem` category
"""
self.go_to_unit_page()
page = ContainerPage(self.browser, None)
expected_components = [
"Blank Common Problem",
"Checkboxes",
"Dropdown",
"Multiple Choice",
"Numerical Input",
"Text Input",
"Checkboxes with Hints and Feedback",
"Dropdown with Hints and Feedback",
"Multiple Choice with Hints and Feedback",
"Numerical Input with Hints and Feedback",
"Text Input with Hints and Feedback",
]
self.assertEqual(page.get_category_tab_components('problem', 1), expected_components)
@attr(shard=1)
@ddt.ddt
class MoveComponentTest(ContainerBase):
"""
Tests of moving an XBlock to another XBlock.
"""
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
def setUp(self, is_staff=True):
super(MoveComponentTest, self).setUp(is_staff=is_staff)
self.container = ContainerPage(self.browser, None)
self.move_modal_view = MoveModalView(self.browser)
self.navigation_options = {
'section': 0,
'subsection': 0,
'unit': 1,
}
self.source_component_display_name = 'HTML 11'
self.source_xblock_category = 'component'
self.message_move = 'Success! "{display_name}" has been moved.'
self.message_undo = 'Move cancelled. "{display_name}" has been moved back to its original location.'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure.
"""
# pylint: disable=attribute-defined-outside-init
self.unit_page1 = XBlockFixtureDesc('vertical', 'Test Unit 1').add_children(
XBlockFixtureDesc('html', 'HTML 11'),
XBlockFixtureDesc('html', 'HTML 12')
)
self.unit_page2 = XBlockFixtureDesc('vertical', 'Test Unit 2').add_children(
XBlockFixtureDesc('html', 'HTML 21'),
XBlockFixtureDesc('html', 'HTML 22')
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
self.unit_page1,
self.unit_page2
)
)
)
def verify_move_opertions(self, unit_page, source_component, operation, component_display_names_after_operation,
should_verify_publish_title=True):
"""
Verify move operations.
Arguments:
unit_page (Object) Unit container page.
source_component (Object) Source XBlock object to be moved.
operation (str), `move` or `undo move` operation.
component_display_names_after_operation (dict) Display names of components after operation in source/dest
should_verify_publish_title (Boolean) Should verify publish title ot not. Default is True.
"""
source_component.open_move_modal()
self.move_modal_view.navigate_to_category(self.source_xblock_category, self.navigation_options)
self.assertEqual(self.move_modal_view.is_move_button_enabled, True)
# Verify unit is in published state before move operation
if should_verify_publish_title:
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.move_modal_view.click_move_button()
self.container.verify_confirmation_message(
self.message_move.format(display_name=self.source_component_display_name)
)
self.assertEqual(len(unit_page.displayed_children), 1)
# Verify unit in draft state now
if should_verify_publish_title:
self.container.verify_publish_title(self.DRAFT_STATUS)
if operation == 'move':
self.container.click_take_me_there_link()
elif operation == 'undo_move':
self.container.click_undo_move_link()
self.container.verify_confirmation_message(
self.message_undo.format(display_name=self.source_component_display_name)
)
unit_page = ContainerPage(self.browser, None)
components = unit_page.displayed_children
self.assertEqual(
[component.name for component in components],
component_display_names_after_operation
)
def verify_state_change(self, unit_page, operation):
"""
Verify that after state change, confirmation message is hidden.
Arguments:
unit_page (Object) Unit container page.
operation (String) Publish or discard changes operation.
"""
# Verify unit in draft state now
self.container.verify_publish_title(self.DRAFT_STATUS)
# Now click publish/discard button
if operation == 'publish':
unit_page.publish_action.click()
else:
unit_page.discard_changes()
# Now verify success message is hidden
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.container.verify_confirmation_message(
message=self.message_move.format(display_name=self.source_component_display_name),
verify_hidden=True
)
def test_move_component_successfully(self):
"""
Test if we can move a component successfully.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
And I navigate to unit in second section
And I see move button is enabled
When I click on the move button
Then I see move operation success message
And When I click on take me there link
Then I see moved component there.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
self.verify_move_opertions(
unit_page=unit_page,
source_component=components[0],
operation='move',
component_display_names_after_operation=['HTML 21', 'HTML 22', 'HTML 11']
)
def test_undo_move_component_successfully(self):
"""
Test if we can undo move a component successfully.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
When I click on the move button
Then I see move operation successful message
And When I clicked on undo move link
Then I see that undo move operation is successful
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
self.verify_move_opertions(
unit_page=unit_page,
source_component=components[0],
operation='undo_move',
component_display_names_after_operation=['HTML 11', 'HTML 12']
)
@ddt.data('publish', 'discard')
def test_publish_discard_changes_afer_move(self, operation):
"""
Test if success banner is hidden when we discard changes or publish the unit after a move operation.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
And I navigate to unit in second section
And I see move button is enabled
When I click on the move button
Then I see move operation success message
And When I click on publish or discard changes button
Then I see move operation success message is hidden.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
components[0].open_move_modal()
self.move_modal_view.navigate_to_category(self.source_xblock_category, self.navigation_options)
self.assertEqual(self.move_modal_view.is_move_button_enabled, True)
# Verify unit is in published state before move operation
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.move_modal_view.click_move_button()
self.container.verify_confirmation_message(
self.message_move.format(display_name=self.source_component_display_name)
)
self.assertEqual(len(unit_page.displayed_children), 1)
self.verify_state_change(unit_page, operation)
def test_content_experiment(self):
"""
Test if we can move a component of content experiment successfully.
Given that I am a staff user
And I go to content experiment page
And I open the move dialogue modal
When I navigate to the unit in second section
Then I see move button is enabled
And when I click on the move button
Then I see move operation success message
And when I click on take me there link
Then I see moved component there
And when I undo move a component
Then I see that undo move operation success message
"""
# Add content experiment support to course.
self.course_fixture.add_advanced_settings({
u'advanced_modules': {'value': ['split_test']},
})
# Create group configurations
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
'metadata': {
u'user_partitions': [
create_user_partition_json(
0,
'Test Group Configuration',
'Description of the group configuration.',
[Group('0', 'Group A'), Group('1', 'Group B')]
),
],
},
})
# Add split test to unit_page1 and assign newly created group configuration to it
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
self.course_fixture.create_xblock(self.unit_page1.locator, split_test)
# Visit content experiment container page.
unit_page = ContainerPage(self.browser, split_test.locator)
unit_page.visit()
group_a_locator = unit_page.displayed_children[0].locator
# Add some components to Group A.
self.course_fixture.create_xblock(
group_a_locator, XBlockFixtureDesc('html', 'HTML 311')
)
self.course_fixture.create_xblock(
group_a_locator, XBlockFixtureDesc('html', 'HTML 312')
)
# Go to group page to move it's component.
group_container_page = ContainerPage(self.browser, group_a_locator)
group_container_page.visit()
# Verify content experiment block has correct groups and components.
components = group_container_page.displayed_children
self.assertEqual(len(components), 2)
self.source_component_display_name = 'HTML 311'
# Verify undo move operation for content experiment.
self.verify_move_opertions(
unit_page=group_container_page,
source_component=components[0],
operation='undo_move',
component_display_names_after_operation=['HTML 311', 'HTML 312'],
should_verify_publish_title=False
)
# Verify move operation for content experiment.
self.verify_move_opertions(
unit_page=group_container_page,
source_component=components[0],
operation='move',
component_display_names_after_operation=['HTML 21', 'HTML 22', 'HTML 311'],
should_verify_publish_title=False
)
# Ideally this test should be decorated with @attr('a11y') so that it should run in a11y jenkins job
# But for some reason it always fails in a11y jenkins job and passes always locally on devstack as well
# as in bokchoy jenkins job. Due to this reason, test is marked to run under bokchoy jenkins job.
def test_a11y(self):
"""
Verify move modal a11y.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
unit_page.a11y_audit.config.set_scope(
include=[".modal-window.move-modal"]
)
unit_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # TODO: AC-716
'link-href', # TODO: AC-716
]
})
unit_page.displayed_children[0].open_move_modal()
for category in ['section', 'subsection', 'component']:
self.move_modal_view.navigate_to_category(category, self.navigation_options)
unit_page.a11y_audit.check_for_accessibility_errors()
|
fintech-circle/edx-platform
|
common/test/acceptance/tests/studio/test_studio_container.py
|
Python
|
agpl-3.0
| 70,273
|
[
"VisIt"
] |
71bd5327deed7e47e2a9921765d3471ef0ee95aeeeb42b18ff2e90dc5c486bab
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAbsseq(RPackage):
"""Inferring differential expression genes by absolute counts
difference between two groups, utilizing Negative binomial
distribution and moderating fold-change according to heterogeneity
of dispersion across expression level."""
homepage = "https://www.bioconductor.org/packages/ABSSeq/"
git = "https://git.bioconductor.org/packages/ABSSeq.git"
version('1.22.8', commit='a67ba49bc156a4522092519644f3ec83d58ebd6a')
depends_on('r@3.4.0:3.4.9', when='@1.22.8')
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-absseq/package.py
|
Python
|
lgpl-2.1
| 1,880
|
[
"Bioconductor"
] |
1a362e76df09783f1d20ca1cd3319a721f1925c9f2f260476da74407ad82debf
|
# This file is part of cclib (http://cclib.sf.net), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
__revision__ = "$Revision$"
import re
import numpy
from . import logfileparser
from . import utils
class Jaguar(logfileparser.Logfile):
"""A Jaguar output file"""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(Jaguar, self).__init__(logname="Jaguar", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "Jaguar output file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'Jaguar("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by Jaguar.
To normalise, three rules need to be applied:
(1) To handle orbitals of E symmetry, retain everything before the /
(2) Replace two p's by "
(2) Replace any remaining single p's by '
>>> t = Jaguar("dummyfile").normalisesym
>>> labels = ['A', 'A1', 'Ag', 'Ap', 'App', "A1p", "A1pp", "E1pp/Ap"]
>>> answers = map(t, labels)
>>> print answers
['A', 'A1', 'Ag', "A'", 'A"', "A1'", 'A1"', 'E1"']
"""
ans = label.split("/")[0].replace("pp", '"').replace("p", "'")
return ans
def before_parsing(self):
self.geoopt = False # Is this a GeoOpt? Needed for SCF targets/values.
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if line[0:4] == "etot":
# Get SCF convergence information
if not hasattr(self, "scfvalues"):
self.scfvalues = []
self.scftargets = [[5E-5, 5E-6]]
values = []
while line[0:4] == "etot":
# Jaguar 4.2
# etot 1 N N 0 N -382.08751886450 2.3E-03 1.4E-01
# etot 2 Y Y 0 N -382.27486023153 1.9E-01 1.4E-03 5.7E-02
# Jaguar 6.5
# etot 1 N N 0 N -382.08751881733 2.3E-03 1.4E-01
# etot 2 Y Y 0 N -382.27486018708 1.9E-01 1.4E-03 5.7E-02
temp = line.split()[7:]
if len(temp)==3:
denergy = float(temp[0])
else:
denergy = 0 # Should really be greater than target value
# or should we just ignore the values in this line
ddensity = float(temp[-2])
maxdiiserr = float(temp[-1])
if not self.geoopt:
values.append([denergy, ddensity])
else:
values.append([ddensity])
line = next(inputfile)
self.scfvalues.append(values)
# Hartree-Fock energy after SCF
if line[1:18] == "SCFE: SCF energy:":
if not hasattr(self, "scfenergies"):
self.scfenergies = []
temp = line.strip().split()
scfenergy = float(temp[temp.index("hartrees") - 1])
scfenergy = utils.convertor(scfenergy, "hartree", "eV")
self.scfenergies.append(scfenergy)
# Energy after LMP2 correction
if line[1:18] == "Total LMP2 Energy":
if not hasattr(self, "mpenergies"):
self.mpenergies = [[]]
lmp2energy = float(line.split()[-1])
lmp2energy = utils.convertor(lmp2energy, "hartree", "eV")
self.mpenergies[-1].append(lmp2energy)
if line[2:14] == "new geometry" or line[1:21] == "Symmetrized geometry" or line.find("Input geometry") > 0:
# Get the atom coordinates
if not hasattr(self, "atomcoords") or line[1:21] == "Symmetrized geometry":
# Wipe the "Input geometry" if "Symmetrized geometry" present
self.atomcoords = []
p = re.compile("(\D+)\d+") # One/more letters followed by a number
atomcoords = []
atomnos = []
angstrom = next(inputfile)
title = next(inputfile)
line = next(inputfile)
while line.strip():
temp = line.split()
element = p.findall(temp[0])[0]
atomnos.append(self.table.number[element])
atomcoords.append(list(map(float, temp[1:])))
line = next(inputfile)
self.atomcoords.append(atomcoords)
self.atomnos = numpy.array(atomnos, "i")
self.natom = len(atomcoords)
# Extract charge and multiplicity
if line[2:22] == "net molecular charge":
self.charge = int(line.split()[-1])
self.mult = int(next(inputfile).split()[-1])
if line[2:24] == "start of program geopt":
if not self.geoopt:
# Need to keep only the RMS density change info
# if this is a geoopt
self.scftargets = [[self.scftargets[0][0]]]
if hasattr(self, "scfvalues"):
self.scfvalues[0] = [[x[0]] for x in self.scfvalues[0]]
self.geoopt = True
else:
self.scftargets.append([5E-5])
if line[2:28] == "geometry optimization step":
# Get Geometry Opt convergence information
if not hasattr(self, "geovalues"):
self.geovalues = []
self.geotargets = numpy.zeros(5, "d")
gopt_step = int(line.split()[-1])
energy = next(inputfile)
# quick hack for messages of the sort:
# ** restarting optimization from step 2 **
# as found in regression file ptnh3_2_H2O_2_2plus.out
if next(inputfile).strip():
blank = next(inputfile)
line = next(inputfile)
values = []
target_index = 0
if gopt_step == 1:
# The first optimization step does not produce an energy change
values.append(0.0)
target_index = 1
while line.strip():
if len(line) > 40 and line[41] == "(":
# A new geo convergence value
values.append(float(line[26:37]))
self.geotargets[target_index] = float(line[43:54])
target_index += 1
line = next(inputfile)
self.geovalues.append(values)
if line.find("number of occupied orbitals") > 0:
# Get number of MOs
occs = int(line.split()[-1])
line = next(inputfile)
virts = int(line.split()[-1])
self.nmo = occs + virts
self.homos = numpy.array([occs-1], "i")
self.unrestrictedflag = False
if line.find("number of alpha occupied orb") > 0:
# Get number of MOs for an unrestricted calc
aoccs = int(line.split()[-1])
line = next(inputfile)
avirts = int(line.split()[-1])
line = next(inputfile)
boccs = int(line.split()[-1])
line = next(inputfile)
bvirt = int(line.split()[-1])
self.nmo = aoccs + avirts
self.homos = numpy.array([aoccs-1, boccs-1], "i")
self.unrestrictedflag = True
# MO energies and symmetries.
# Jaguar 7.0: provides energies and symmetries for both
# restricted and unrestricted calculations, like this:
# Alpha Orbital energies/symmetry label:
# -10.25358 Bu -10.25353 Ag -10.21931 Bu -10.21927 Ag
# -10.21792 Bu -10.21782 Ag -10.21773 Bu -10.21772 Ag
# ...
# Jaguar 6.5: prints both only for restricted calculations,
# so for unrestricted calculations the output it looks like this:
# Alpha Orbital energies:
# -10.25358 -10.25353 -10.21931 -10.21927 -10.21792 -10.21782
# -10.21773 -10.21772 -10.21537 -10.21537 -1.02078 -0.96193
# ...
# Presence of 'Orbital energies' is enough to catch all versions.
if "Orbital energies" in line:
# Parsing results is identical for restricted/unrestricted
# calculations, just assert later that alpha/beta order is OK.
spin = int(line[2:6] == "Beta")
# Check if symmetries are printed also.
issyms = "symmetry label" in line
if not hasattr(self, "moenergies"):
self.moenergies = []
if issyms and not hasattr(self, "mosyms"):
self.mosyms = []
# Grow moeneriges/mosyms and make sure they are empty when
# parsed multiple times - currently cclib returns only
# the final output (ex. in a geomtry optimization).
if len(self.moenergies) < spin+1:
self.moenergies.append([])
self.moenergies[spin] = []
if issyms:
if len(self.mosyms) < spin+1:
self.mosyms.append([])
self.mosyms[spin] = []
line = next(inputfile).split()
while len(line) > 0:
if issyms:
energies = [float(line[2*i]) for i in range(len(line)//2)]
syms = [line[2*i+1] for i in range(len(line)//2)]
else:
energies = [float(e) for e in line]
energies = [utils.convertor(e, "hartree", "eV") for e in energies]
self.moenergies[spin].extend(energies)
if issyms:
syms = [self.normalisesym(s) for s in syms]
self.mosyms[spin].extend(syms)
line = next(inputfile).split()
# There should always be an extra blank line after all this.
line = next(inputfile)
if line.find("Occupied + virtual Orbitals- final wvfn") > 0:
blank = next(inputfile)
stars = next(inputfile)
blank = next(inputfile)
blank = next(inputfile)
if not hasattr(self,"mocoeffs"):
if self.unrestrictedflag:
spin = 2
else:
spin = 1
self.mocoeffs = []
aonames = []
lastatom = "X"
readatombasis = False
if not hasattr(self, "atombasis"):
self.atombasis = []
for i in range(self.natom):
self.atombasis.append([])
readatombasis = True
offset = 0
for s in range(spin):
mocoeffs = numpy.zeros((len(self.moenergies[s]), self.nbasis), "d")
if s == 1: #beta case
stars = next(inputfile)
blank = next(inputfile)
title = next(inputfile)
blank = next(inputfile)
stars = next(inputfile)
blank = next(inputfile)
blank = next(inputfile)
for k in range(0,len(self.moenergies[s]),5):
if self.progress:
self.updateprogress(inputfile, "Coefficients")
numbers = next(inputfile)
eigens = next(inputfile)
line = next(inputfile)
for i in range(self.nbasis):
info = line.split()
# Fill atombasis only first time around.
if readatombasis and k == 0:
orbno = int(info[0])
atom = info[1]
if atom[1].isalpha():
atomno = int(atom[2:])
else:
atomno = int(atom[1:])
self.atombasis[atomno-1].append(orbno-1)
if not hasattr(self,"aonames"):
if lastatom != info[1]:
scount = 1
pcount = 3
dcount = 6 #six d orbitals in Jaguar
if info[2] == 'S':
aonames.append("%s_%i%s"%(info[1], scount, info[2]))
scount += 1
if info[2] == 'X' or info[2] == 'Y' or info[2] == 'Z':
aonames.append("%s_%iP%s"%(info[1], pcount / 3, info[2]))
pcount += 1
if info[2] == 'XX' or info[2] == 'YY' or info[2] == 'ZZ' or \
info[2] == 'XY' or info[2] == 'XZ' or info[2] == 'YZ':
aonames.append("%s_%iD%s"%(info[1], dcount / 6, info[2]))
dcount += 1
lastatom = info[1]
for j in range(len(info[3:])):
mocoeffs[j+k, i] = float(info[3+j])
line = next(inputfile)
if not hasattr(self,"aonames"):
self.aonames = aonames
offset += 5
self.mocoeffs.append(mocoeffs)
if line[2:6] == "olap":
if line[6] == "-":
return
# This was continue (in loop) before parser refactoring.
# continue # avoid "olap-dev"
self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
for i in range(0, self.nbasis, 5):
if self.progress:
self.updateprogress(inputfile, "Overlap")
blank = next(inputfile)
header = next(inputfile)
for j in range(i, self.nbasis):
temp = list(map(float, next(inputfile).split()[1:]))
self.aooverlaps[j, i:(i+len(temp))] = temp
self.aooverlaps[i:(i+len(temp)), j] = temp
if line[1:28] == "number of occupied orbitals":
self.homos = numpy.array([float(line.strip().split()[-1])-1], "i")
if line[2:27] == "number of basis functions":
self.nbasis = int(line.strip().split()[-1])
# IR output looks like this:
# frequencies 72.45 113.25 176.88 183.76 267.60 312.06
# symmetries Au Bg Au Bu Ag Bg
# intensities 0.07 0.00 0.28 0.52 0.00 0.00
# reduc. mass 1.90 0.74 1.06 1.42 1.19 0.85
# force const 0.01 0.01 0.02 0.03 0.05 0.05
# C1 X 0.00000 0.00000 0.00000 -0.05707 -0.06716 0.00000
# C1 Y 0.00000 0.00000 0.00000 0.00909 -0.02529 0.00000
# C1 Z 0.04792 -0.06032 -0.01192 0.00000 0.00000 0.11613
# C2 X 0.00000 0.00000 0.00000 -0.06094 -0.04635 0.00000
# ... etc. ...
# This is a complete ouput, some files will not have intensities,
# and older Jaguar versions sometimes skip the symmetries.
if line[2:23] == "start of program freq":
self.vibfreqs = []
self.vibdisps = []
forceconstants = False
intensities = False
blank = next(inputfile)
line = next(inputfile)
while line.strip():
if "force const" in line:
forceconstants = True
if "intensities" in line:
intensities = True
line = next(inputfile)
freqs = next(inputfile)
# The last block has an extra blank line after it - catch it.
while freqs.strip():
# Number of modes (columns printed in this block).
nmodes = len(freqs.split())-1
# Append the frequencies.
self.vibfreqs.extend(list(map(float, freqs.split()[1:])))
line = next(inputfile).split()
# May skip symmetries (older Jaguar versions).
if line[0] == "symmetries":
if not hasattr(self, "vibsyms"):
self.vibsyms = []
self.vibsyms.extend(list(map(self.normalisesym, line[1:])))
line = next(inputfile).split()
if intensities:
if not hasattr(self, "vibirs"):
self.vibirs = []
self.vibirs.extend(list(map(float, line[1:])))
line = next(inputfile).split()
if forceconstants:
line = next(inputfile)
# Start parsing the displacements.
# Variable 'q' holds up to 7 lists of triplets.
q = [ [] for i in range(7) ]
for n in range(self.natom):
# Variable 'p' holds up to 7 triplets.
p = [ [] for i in range(7) ]
for i in range(3):
line = next(inputfile)
disps = [float(disp) for disp in line.split()[2:]]
for j in range(nmodes):
p[j].append(disps[j])
for i in range(nmodes):
q[i].append(p[i])
self.vibdisps.extend(q[:nmodes])
blank = next(inputfile)
freqs = next(inputfile)
# Convert new data to arrays.
self.vibfreqs = numpy.array(self.vibfreqs, "d")
self.vibdisps = numpy.array(self.vibdisps, "d")
if hasattr(self, "vibirs"):
self.vibirs = numpy.array(self.vibirs, "d")
# Parse excited state output (for CIS calculations).
# Jaguar calculates only singlet states.
if line[2:15] == "Excited State":
if not hasattr(self, "etenergies"):
self.etenergies = []
if not hasattr(self, "etoscs"):
self.etoscs = []
if not hasattr(self, "etsecs"):
self.etsecs = []
self.etsyms = []
etenergy = float(line.split()[3])
etenergy = utils.convertor(etenergy, "eV", "cm-1")
self.etenergies.append(etenergy)
# Skip 4 lines
for i in range(5):
line = next(inputfile)
self.etsecs.append([])
# Jaguar calculates only singlet states.
self.etsyms.append('Singlet-A')
while line.strip() != "":
fromMO = int(line.split()[0])-1
toMO = int(line.split()[2])-1
coeff = float(line.split()[-1])
self.etsecs[-1].append([(fromMO, 0), (toMO, 0), coeff])
line = next(inputfile)
# Skip 3 lines
for i in range(4):
line = next(inputfile)
strength = float(line.split()[-1])
self.etoscs.append(strength)
if __name__ == "__main__":
import doctest, jaguarparser
doctest.testmod(jaguarparser, verbose=False)
|
Clyde-fare/cclib_bak
|
src/cclib/parser/jaguarparser.py
|
Python
|
lgpl-2.1
| 20,075
|
[
"Jaguar",
"cclib"
] |
a76fd43610c5ccae9001613eea2b78d4833a351d02e7ee716179dad37c200f34
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
slogdet = gen_linalg_ops.log_matrix_determinant
tf_export('linalg.slogdet')(dispatch.add_dispatch_support(slogdet))
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
logm = gen_linalg_ops.matrix_logarithm
lu = gen_linalg_ops.lu
tf_export('linalg.logm')(dispatch.add_dispatch_support(logm))
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
sqrtm = linalg_ops.matrix_square_root
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
@tf_export('linalg.logdet')
@dispatch.add_dispatch_support
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.linalg.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
axis=[-1])
@tf_export('linalg.adjoint')
@dispatch.add_dispatch_support
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
# This section is ported nearly verbatim from Eigen's implementation:
# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html
def _matrix_exp_pade3(matrix):
"""3rd-order Pade approximant for matrix exponential."""
b = [120.0, 60.0, 12.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
tmp = matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade5(matrix):
"""5th-order Pade approximant for matrix exponential."""
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade7(matrix):
"""7th-order Pade approximant for matrix exponential."""
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade9(matrix):
"""9th-order Pade approximant for matrix exponential."""
b = [
17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,
2162160.0, 110880.0, 3960.0, 90.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
matrix_8 = math_ops.matmul(matrix_6, matrix_2)
tmp = (
matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +
b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = (
b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +
b[0] * ident)
return matrix_u, matrix_v
def _matrix_exp_pade13(matrix):
"""13th-order Pade approximant for matrix exponential."""
b = [
64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,
33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp_u = (
math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +
b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp_u)
tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2
matrix_v = (
math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +
b[2] * matrix_2 + b[0] * ident)
return matrix_u, matrix_v
@tf_export('linalg.expm')
@dispatch.add_dispatch_support
def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin
r"""Computes the matrix exponential of one or more square matrices.
$$exp(A) = \sum_{n=0}^\infty A^n/n!$$
The exponential is computed using a combination of the scaling and squaring
method and the Pade approximation. Details can be found in:
Nicholas J. Higham, "The scaling and squaring method for the matrix
exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. The output is a tensor of the same shape as the input
containing the exponential for all input submatrices `[..., :, :]`.
Args:
input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or
`complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
the matrix exponential of the input.
Raises:
ValueError: An unsupported type is provided as input.
@compatibility(scipy)
Equivalent to scipy.linalg.expm
@end_compatibility
"""
with ops.name_scope(name, 'matrix_exponential', [input]):
matrix = ops.convert_to_tensor(input, name='input')
if matrix.shape[-2:] == [0, 0]:
return matrix
batch_shape = matrix.shape[:-2]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(matrix)[:-2]
# reshaping the batch makes the where statements work better
matrix = array_ops.reshape(
matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))
l1_norm = math_ops.reduce_max(
math_ops.reduce_sum(
math_ops.abs(matrix),
axis=array_ops.size(array_ops.shape(matrix)) - 2),
axis=-1)[..., array_ops.newaxis, array_ops.newaxis]
const = lambda x: constant_op.constant(x, l1_norm.dtype)
def _nest_where(vals, cases):
assert len(vals) == len(cases) - 1
if len(vals) == 1:
return array_ops.where_v2(
math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])
else:
return array_ops.where_v2(
math_ops.less(l1_norm, const(vals[0])), cases[0],
_nest_where(vals[1:], cases[1:]))
if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:
maxnorm = const(3.925724783138660)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(
matrix /
math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
conds = (4.258730016922831e-001, 1.880152677804762e+000)
u = _nest_where(conds, (u3, u5, u7))
v = _nest_where(conds, (v3, v5, v7))
elif matrix.dtype in [dtypes.float64, dtypes.complex128]:
maxnorm = const(5.371920351148152)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix)
u9, v9 = _matrix_exp_pade9(matrix)
u13, v13 = _matrix_exp_pade13(
matrix /
math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
conds = (1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000)
u = _nest_where(conds, (u3, u5, u7, u9, u13))
v = _nest_where(conds, (v3, v5, v7, v9, v13))
else:
raise ValueError('tf.linalg.expm does not support matrices of type %s' %
matrix.dtype)
is_finite = math_ops.is_finite(math_ops.reduce_max(l1_norm))
nan = constant_op.constant(np.nan, matrix.dtype)
result = control_flow_ops.cond(
is_finite, lambda: linalg_ops.matrix_solve(-u + v, u + v),
lambda: array_ops.fill(array_ops.shape(matrix), nan))
max_squarings = math_ops.reduce_max(squarings)
i = const(0.0)
def c(i, _):
return control_flow_ops.cond(is_finite,
lambda: math_ops.less(i, max_squarings),
lambda: constant_op.constant(False))
def b(i, r):
return i + 1, array_ops.where_v2(
math_ops.less(i, squarings), math_ops.matmul(r, r), r)
_, result = control_flow_ops.while_loop(c, b, [i, result])
if not matrix.shape.is_fully_defined():
return array_ops.reshape(
result,
array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))
return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))
@tf_export('linalg.banded_triangular_solve', v1=[])
def banded_triangular_solve(
bands,
rhs,
lower=True,
adjoint=False, # pylint: disable=redefined-outer-name
name=None):
r"""Solve triangular systems of equations with a banded solver.
`bands` is a tensor of shape `[..., K, M]`, where `K` represents the number
of bands stored. This corresponds to a batch of `M` by `M` matrices, whose
`K` subdiagonals (when `lower` is `True`) are stored.
This operator broadcasts the batch dimensions of `bands` and the batch
dimensions of `rhs`.
Examples:
Storing 2 bands of a 3x3 matrix.
Note that first element in the second row is ignored due to
the 'LEFT_RIGHT' padding.
>>> x = [[2., 3., 4.], [1., 2., 3.]]
>>> x2 = [[2., 3., 4.], [10000., 2., 3.]]
>>> y = tf.zeros([3, 3])
>>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(-1, 0))
>>> z
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[2., 0., 0.],
[2., 3., 0.],
[0., 3., 4.]], dtype=float32)>
>>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([3, 1]))
>>> soln
<tf.Tensor: shape=(3, 1), dtype=float32, numpy=
array([[0.5 ],
[0. ],
[0.25]], dtype=float32)>
>>> are_equal = soln == tf.linalg.banded_triangular_solve(x2, tf.ones([3, 1]))
>>> tf.reduce_all(are_equal).numpy()
True
>>> are_equal = soln == tf.linalg.triangular_solve(z, tf.ones([3, 1]))
>>> tf.reduce_all(are_equal).numpy()
True
Storing 2 superdiagonals of a 4x4 matrix. Because of the 'LEFT_RIGHT' padding
the last element of the first row is ignored.
>>> x = [[2., 3., 4., 5.], [-1., -2., -3., -4.]]
>>> y = tf.zeros([4, 4])
>>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(0, 1))
>>> z
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[-1., 2., 0., 0.],
[ 0., -2., 3., 0.],
[ 0., 0., -3., 4.],
[ 0., 0., -0., -4.]], dtype=float32)>
>>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([4, 1]), lower=False)
>>> soln
<tf.Tensor: shape=(4, 1), dtype=float32, numpy=
array([[-4. ],
[-1.5 ],
[-0.6666667],
[-0.25 ]], dtype=float32)>
>>> are_equal = (soln == tf.linalg.triangular_solve(
... z, tf.ones([4, 1]), lower=False))
>>> tf.reduce_all(are_equal).numpy()
True
Args:
bands: A `Tensor` describing the bands of the left hand side, with shape
`[..., K, M]`. The `K` rows correspond to the diagonal to the `K - 1`-th
diagonal (the diagonal is the top row) when `lower` is `True` and
otherwise the `K - 1`-th superdiagonal to the diagonal (the diagonal is
the bottom row) when `lower` is `False`. The bands are stored with
'LEFT_RIGHT' alignment, where the superdiagonals are padded on the right
and subdiagonals are padded on the left. This is the alignment cuSPARSE
uses. See `tf.linalg.set_diag` for more details.
rhs: A `Tensor` of shape [..., M] or [..., M, N] and with the same dtype as
`diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
statically, `rhs` will be treated as a matrix rather than a vector.
lower: An optional `bool`. Defaults to `True`. Boolean indicating whether
`bands` represents a lower or upper triangular matrix.
adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether
to solve with the matrix's block-wise adjoint.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M] or [..., M, N] containing the solutions.
"""
with ops.name_scope(name, 'banded_triangular_solve', [bands, rhs]):
return gen_linalg_ops.banded_triangular_solve(
bands, rhs, lower=lower, adjoint=adjoint)
@tf_export('linalg.tridiagonal_solve')
@dispatch.add_dispatch_support
def tridiagonal_solve(diagonals,
rhs,
diagonals_format='compact',
transpose_rhs=False,
conjugate_rhs=False,
name=None,
partial_pivoting=True,
perturb_singular=False):
r"""Solves tridiagonal systems of equations.
The input can be supplied in various formats: `matrix`, `sequence` and
`compact`, specified by the `diagonals_format` arg.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
In `sequence` format, `diagonals` are supplied as a tuple or list of three
tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing
superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either
`M-1` or `M`; in the latter case, the last element of superdiagonal and the
first element of subdiagonal will be ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `compact` format is recommended as the one with best performance. In case
you need to cast a tensor into a compact format manually, use `tf.gather_nd`.
An example for a tensor of shape [m, m]:
```python
rhs = tf.constant([...])
matrix = tf.constant([[...]])
m = matrix.shape[0]
dummy_idx = [0, 0] # An arbitrary element to use as a dummy
indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal
[[i, i] for i in range(m)], # Diagonal
[dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal
diagonals=tf.gather_nd(matrix, indices)
x = tf.linalg.tridiagonal_solve(diagonals, rhs)
```
Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or
`[..., M, K]`. The latter allows to simultaneously solve K systems with the
same left-hand sides and K different right-hand sides. If `transpose_rhs`
is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.
The batch dimensions, denoted as `...`, must be the same in `diagonals` and
`rhs`.
The output is a tensor of the same shape as `rhs`: either `[..., M]` or
`[..., M, K]`.
The op isn't guaranteed to raise an error if the input matrix is not
invertible. `tf.debugging.check_numerics` can be applied to the output to
detect invertibility problems.
**Note**: with large batch sizes, the computation on the GPU may be slow, if
either `partial_pivoting=True` or there are multiple right-hand sides
(`K > 1`). If this issue arises, consider if it's possible to disable pivoting
and have `K = 1`, or, alternatively, consider using CPU.
On CPU, solution is computed via Gaussian elimination with or without partial
pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE
library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as
`diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
statically, `rhs` will be treated as a matrix rather than a vector.
diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
`compact`.
transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect
if the shape of rhs is [..., M]).
conjugate_rhs: If `True`, `rhs` is conjugated before solving.
name: A name to give this `Op` (optional).
partial_pivoting: whether to perform partial pivoting. `True` by default.
Partial pivoting makes the procedure more stable, but slower. Partial
pivoting is unnecessary in some cases, including diagonally dominant and
symmetric positive definite matrices (see e.g. theorem 9.12 in [1]).
perturb_singular: whether to perturb singular matrices to return a finite
result. `False` by default. If true, solutions to systems involving
a singular matrix will be computed by perturbing near-zero pivots in
the partially pivoted LU decomposition. Specifically, tiny pivots are
perturbed by an amount of order `eps * max_{ij} |U(i,j)|` to avoid
overflow. Here `U` is the upper triangular part of the LU decomposition,
and `eps` is the machine precision. This is useful for solving
numerically singular systems when computing eigenvectors by inverse
iteration.
If `partial_pivoting` is `False`, `perturb_singular` must be `False` as
well.
Returns:
A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.
If the input matrix is singular, the result is undefined.
Raises:
ValueError: Is raised if any of the following conditions hold:
1. An unsupported type is provided as input,
2. the input tensors have incorrect shapes,
3. `perturb_singular` is `True` but `partial_pivoting` is not.
UnimplementedError: Whenever `partial_pivoting` is true and the backend is
XLA, or whenever `perturb_singular` is true and the backend is
XLA or GPU.
[1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms:
Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7.
"""
if perturb_singular and not partial_pivoting:
raise ValueError('partial_pivoting must be True if perturb_singular is.')
if diagonals_format == 'compact':
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
if diagonals_format == 'sequence':
if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:
raise ValueError('Expected diagonals to be a sequence of length 3.')
superdiag, maindiag, subdiag = diagonals
if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or
not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):
raise ValueError(
'Tensors representing the three diagonals must have the same shape,'
'except for the last dimension, got {}, {}, {}'.format(
subdiag.shape, maindiag.shape, superdiag.shape))
m = tensor_shape.dimension_value(maindiag.shape[-1])
def pad_if_necessary(t, name, last_dim_padding):
n = tensor_shape.dimension_value(t.shape[-1])
if not n or n == m:
return t
if n == m - 1:
paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] +
[last_dim_padding])
return array_ops.pad(t, paddings)
raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(
name, m, m - 1, n))
subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])
superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])
diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2)
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
if diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
m = m1 or m2
diagonals = array_ops.matrix_diag_part(
diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))
def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name):
"""Helper function used after the input has been cast to compact form."""
diags_rank, rhs_rank = diagonals.shape.rank, rhs.shape.rank
# If we know the rank of the diagonal tensor, do some static checking.
if diags_rank:
if diags_rank < 2:
raise ValueError(
'Expected diagonals to have rank at least 2, got {}'.format(
diags_rank))
if rhs_rank and rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
diags_rank - 1, diags_rank, rhs_rank))
if (rhs_rank and not diagonals.shape[:-2].is_compatible_with(
rhs.shape[:diags_rank - 2])):
raise ValueError('Batch shapes {} and {} are incompatible'.format(
diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
if diagonals.shape[-2] and diagonals.shape[-2] != 3:
raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
def check_num_lhs_matches_num_rhs():
if (diagonals.shape[-1] and rhs.shape[-2] and
diagonals.shape[-1] != rhs.shape[-2]):
raise ValueError('Expected number of left-hand sided and right-hand '
'sides to be equal, got {} and {}'.format(
diagonals.shape[-1], rhs.shape[-2]))
if rhs_rank and diags_rank and rhs_rank == diags_rank - 1:
# Rhs provided as a vector, ignoring transpose_rhs
if conjugate_rhs:
rhs = math_ops.conj(rhs)
rhs = array_ops.expand_dims(rhs, -1)
check_num_lhs_matches_num_rhs()
return array_ops.squeeze(
linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting,
perturb_singular, name), -1)
if transpose_rhs:
rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
elif conjugate_rhs:
rhs = math_ops.conj(rhs)
check_num_lhs_matches_num_rhs()
return linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting,
perturb_singular, name)
@tf_export('linalg.tridiagonal_matmul')
@dispatch.add_dispatch_support
def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):
r"""Multiplies tridiagonal matrix by matrix.
`diagonals` is representation of 3-diagonal NxN matrix, which depends on
`diagonals_format`.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
If `sequence` format, `diagonals` is list or tuple of three tensors:
`[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element
of `superdiag` first element of `subdiag` are ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `sequence` format is recommended as the one with the best performance.
`rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.
Example:
```python
superdiag = tf.constant([-1, -1, 0], dtype=tf.float64)
maindiag = tf.constant([2, 2, 2], dtype=tf.float64)
subdiag = tf.constant([0, -1, -1], dtype=tf.float64)
diagonals = [superdiag, maindiag, subdiag]
rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)
x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')
```
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.
diagonals_format: one of `sequence`, or `compact`. Default is `compact`.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M, N] containing the result of multiplication.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
"""
if diagonals_format == 'compact':
superdiag = diagonals[..., 0, :]
maindiag = diagonals[..., 1, :]
subdiag = diagonals[..., 2, :]
elif diagonals_format == 'sequence':
superdiag, maindiag, subdiag = diagonals
elif diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
diags = array_ops.matrix_diag_part(
diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
superdiag = diags[..., 0, :]
maindiag = diags[..., 1, :]
subdiag = diags[..., 2, :]
else:
raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)
# C++ backend requires matrices.
# Converting 1-dimensional vectors to matrices with 1 row.
superdiag = array_ops.expand_dims(superdiag, -2)
maindiag = array_ops.expand_dims(maindiag, -2)
subdiag = array_ops.expand_dims(subdiag, -2)
return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)
def _maybe_validate_matrix(a, validate_args):
"""Checks that input is a `float` matrix."""
assertions = []
if not a.dtype.is_floating:
raise TypeError('Input `a` must have `float`-like `dtype` '
'(saw {}).'.format(a.dtype.name))
if a.shape is not None and a.shape.rank is not None:
if a.shape.rank < 2:
raise ValueError('Input `a` must have at least 2 dimensions '
'(saw: {}).'.format(a.shape.rank))
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(
a, rank=2, message='Input `a` must have at least 2 dimensions.'))
return assertions
@tf_export('linalg.matrix_rank')
@dispatch.add_dispatch_support
def matrix_rank(a, tol=None, validate_args=False, name=None):
"""Compute the matrix rank of one or more matrices.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as 'zero'.
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'matrix_rank'.
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
"""
with ops.name_scope(name or 'matrix_rank'):
a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
s = svd(a, compute_uv=False)
if tol is None:
if (a.shape[-2:]).is_fully_defined():
m = np.max(a.shape[-2:].as_list())
else:
m = math_ops.reduce_max(array_ops.shape(a)[-2:])
eps = np.finfo(a.dtype.as_numpy_dtype).eps
tol = (
eps * math_ops.cast(m, a.dtype) *
math_ops.reduce_max(s, axis=-1, keepdims=True))
return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)
@tf_export('linalg.pinv')
@dispatch.add_dispatch_support
def pinv(a, rcond=None, validate_args=False, name=None):
"""Compute the Moore-Penrose pseudo-inverse of one or more matrices.
Calculate the [generalized inverse of a matrix](
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
singular-value decomposition (SVD) and including all large singular values.
The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves'
[the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then
`A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
`U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
`A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]
This function is analogous to [`numpy.linalg.pinv`](
https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
default `rcond` is `1e-15`. Here the default is
`10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
rcond: `Tensor` of small singular value cutoffs. Singular values smaller
(in modulus) than `rcond` * largest_singular_value (again, in modulus) are
set to zero. Must broadcast against `tf.shape(a)[:-2]`.
Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'pinv'.
Returns:
a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except
rightmost two dimensions are transposed.
Raises:
TypeError: if input `a` does not have `float`-like `dtype`.
ValueError: if input `a` has fewer than 2 dimensions.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
a = tf.constant([[1., 0.4, 0.5],
[0.4, 0.2, 0.25],
[0.5, 0.25, 0.35]])
tf.matmul(tf.linalg..pinv(a), a)
# ==> array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
a = tf.constant([[1., 0.4, 0.5, 1.],
[0.4, 0.2, 0.25, 2.],
[0.5, 0.25, 0.35, 3.]])
tf.matmul(tf.linalg..pinv(a), a)
# ==> array([[ 0.76, 0.37, 0.21, -0.02],
[ 0.37, 0.43, -0.33, 0.02],
[ 0.21, -0.33, 0.81, 0.01],
[-0.02, 0.02, 0.01, 1. ]], dtype=float32)
```
#### References
[1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press,
Inc., 1980, pp. 139-142.
"""
with ops.name_scope(name or 'pinv'):
a = ops.convert_to_tensor(a, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
dtype = a.dtype.as_numpy_dtype
if rcond is None:
def get_dim_size(dim):
dim_val = tensor_shape.dimension_value(a.shape[dim])
if dim_val is not None:
return dim_val
return array_ops.shape(a)[dim]
num_rows = get_dim_size(-2)
num_cols = get_dim_size(-1)
if isinstance(num_rows, int) and isinstance(num_cols, int):
max_rows_cols = float(max(num_rows, num_cols))
else:
max_rows_cols = math_ops.cast(
math_ops.maximum(num_rows, num_cols), dtype)
rcond = 10. * max_rows_cols * np.finfo(dtype).eps
rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond')
# Calculate pseudo inverse via SVD.
# Note: if a is Hermitian then u == v. (We might observe additional
# performance by explicitly setting `v = u` in such cases.)
[
singular_values, # Sigma
left_singular_vectors, # U
right_singular_vectors, # V
] = svd(
a, full_matrices=False, compute_uv=True)
# Saturate small singular values to inf. This has the effect of make
# `1. / s = 0.` while not resulting in `NaN` gradients.
cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1)
singular_values = array_ops.where_v2(
singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values,
np.array(np.inf, dtype))
# By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse
# is defined as `pinv(a) == v @ inv(s) @ u^H`.
a_pinv = math_ops.matmul(
right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2),
left_singular_vectors,
adjoint_b=True)
if a.shape is not None and a.shape.rank is not None:
a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))
return a_pinv
@tf_export('linalg.lu_solve')
@dispatch.add_dispatch_support
def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):
"""Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[...,
tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_solve').
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_solve'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs')
assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
rhs = array_ops.identity(rhs)
if (rhs.shape.rank == 2 and perm.shape.rank == 1):
# Both rhs and perm have scalar batch_shape.
permuted_rhs = array_ops.gather(rhs, perm, axis=-2)
else:
# Either rhs or perm have non-scalar batch_shape or we can't determine
# this information statically.
rhs_shape = array_ops.shape(rhs)
broadcast_batch_shape = array_ops.broadcast_dynamic_shape(
rhs_shape[:-2],
array_ops.shape(perm)[:-1])
d, m = rhs_shape[-2], rhs_shape[-1]
rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]],
axis=0)
# Tile out rhs.
broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape)
broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m])
# Tile out perm and add batch indices.
broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1])
broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d])
broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape)
broadcast_batch_indices = array_ops.broadcast_to(
math_ops.range(broadcast_batch_size)[:, array_ops.newaxis],
[broadcast_batch_size, d])
broadcast_perm = array_ops.stack(
[broadcast_batch_indices, broadcast_perm], axis=-1)
permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm)
permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(
array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype))
return triangular_solve(
lower_upper, # Only upper is accessed.
triangular_solve(lower, permuted_rhs),
lower=False)
@tf_export('linalg.lu_matrix_inverse')
@dispatch.add_dispatch_support
def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):
"""Computes the inverse given the LU decomposition(s) of one or more matrices.
This op is conceptually identical to,
```python
inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))
tf.assert_near(tf.matrix_inverse(X), inv_X)
# ==> True
```
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_matrix_inverse').
Returns:
inv_x: The matrix_inv, i.e.,
`tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_matrix_inverse'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
return lu_solve(
lower_upper,
perm,
rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype),
validate_args=False)
@tf_export('linalg.lu_reconstruct')
@dispatch.add_dispatch_support
def lu_reconstruct(lower_upper, perm, validate_args=False, name=None):
"""The reconstruct one or more matrices from their LU decomposition(s).
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_reconstruct').
Returns:
x: The original input to `tf.linalg.lu`, i.e., `x` as in,
`lu_reconstruct(*tf.linalg.lu(x))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))
tf.assert_near(x, x_reconstructed)
# ==> True
```
"""
with ops.name_scope(name or 'lu_reconstruct'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(shape[:-1], dtype=lower_upper.dtype))
upper = band_part(lower_upper, num_lower=0, num_upper=-1)
x = math_ops.matmul(lower, upper)
if (lower_upper.shape is None or lower_upper.shape.rank is None or
lower_upper.shape.rank != 2):
# We either don't know the batch rank or there are >0 batch dims.
batch_size = math_ops.reduce_prod(shape[:-2])
d = shape[-1]
x = array_ops.reshape(x, [batch_size, d, d])
perm = array_ops.reshape(perm, [batch_size, d])
perm = map_fn.map_fn(array_ops.invert_permutation, perm)
batch_indices = array_ops.broadcast_to(
math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])
x = array_ops.gather_nd(x, array_ops.stack([batch_indices, perm],
axis=-1))
x = array_ops.reshape(x, shape)
else:
x = array_ops.gather(x, array_ops.invert_permutation(perm))
x.set_shape(lower_upper.shape)
return x
def lu_reconstruct_assertions(lower_upper, perm, validate_args):
"""Returns list of assertions related to `lu_reconstruct` assumptions."""
assertions = []
message = 'Input `lower_upper` must have at least 2 dimensions.'
if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message))
message = '`rank(lower_upper)` must equal `rank(perm) + 1`'
if lower_upper.shape.rank is not None and perm.shape.rank is not None:
if lower_upper.shape.rank != perm.shape.rank + 1:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank(
lower_upper, rank=array_ops.rank(perm) + 1, message=message))
message = '`lower_upper` must be square.'
if lower_upper.shape[:-2].is_fully_defined():
if lower_upper.shape[-2] != lower_upper.shape[-1]:
raise ValueError(message)
elif validate_args:
m, n = array_ops.split(
array_ops.shape(lower_upper)[-2:], num_or_size_splits=2)
assertions.append(check_ops.assert_equal(m, n, message=message))
return assertions
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
"""Returns list of assertions related to `lu_solve` assumptions."""
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(rhs, rank=2, message=message))
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if (lower_upper.shape[-1] is not None and rhs.shape[-2] is not None):
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_equal(
array_ops.shape(lower_upper)[-1],
array_ops.shape(rhs)[-2],
message=message))
return assertions
@tf_export('linalg.eigh_tridiagonal')
@dispatch.add_dispatch_support
def eigh_tridiagonal(alpha,
beta,
eigvals_only=True,
select='a',
select_range=None,
tol=None,
name=None):
"""Computes the eigenvalues of a Hermitian tridiagonal matrix.
Args:
alpha: A real or complex tensor of shape (n), the diagonal elements of the
matrix. NOTE: If alpha is complex, the imaginary part is ignored (assumed
zero) to satisfy the requirement that the matrix be Hermitian.
beta: A real or complex tensor of shape (n-1), containing the elements of
the first super-diagonal of the matrix. If beta is complex, the first
sub-diagonal of the matrix is assumed to be the conjugate of beta to
satisfy the requirement that the matrix be Hermitian
eigvals_only: If False, both eigenvalues and corresponding eigenvectors are
computed. If True, only eigenvalues are computed. Default is True.
select: Optional string with values in {‘a’, ‘v’, ‘i’} (default is 'a') that
determines which eigenvalues to calculate:
'a': all eigenvalues.
‘v’: eigenvalues in the interval (min, max] given by `select_range`.
'i’: eigenvalues with indices min <= i <= max.
select_range: Size 2 tuple or list or tensor specifying the range of
eigenvalues to compute together with select. If select is 'a',
select_range is ignored.
tol: Optional scalar. The absolute tolerance to which each eigenvalue is
required. An eigenvalue (or cluster) is considered to have converged if it
lies in an interval of this width. If tol is None (default), the value
eps*|T|_2 is used where eps is the machine precision, and |T|_2 is the
2-norm of the matrix T.
name: Optional name of the op.
Returns:
eig_vals: The eigenvalues of the matrix in non-decreasing order.
eig_vectors: If `eigvals_only` is False the eigenvectors are returned in
the second output argument.
Raises:
ValueError: If input values are invalid.
NotImplemented: Computing eigenvectors for `eigvals_only` = False is
not implemented yet.
This op implements a subset of the functionality of
scipy.linalg.eigh_tridiagonal.
Note: The result is undefined if the input contains +/-inf or NaN, or if
any value in beta has a magnitude greater than
`numpy.sqrt(numpy.finfo(beta.dtype.as_numpy_dtype).max)`.
TODO(b/187527398):
Add support for outer batch dimensions.
#### Examples
```python
import numpy
eigvals = tf.linalg.eigh_tridiagonal([0.0, 0.0, 0.0], [1.0, 1.0])
eigvals_expected = [-numpy.sqrt(2.0), 0.0, numpy.sqrt(2.0)]
tf.assert_near(eigvals_expected, eigvals)
# ==> True
```
"""
with ops.name_scope(name or 'eigh_tridiagonal'):
def _compute_eigenvalues(alpha, beta):
"""Computes all eigenvalues of a Hermitian tridiagonal matrix."""
def _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, x):
"""Implements the Sturm sequence recurrence."""
with ops.name_scope('sturm'):
n = alpha.shape[0]
zeros = array_ops.zeros(array_ops.shape(x), dtype=dtypes.int32)
ones = array_ops.ones(array_ops.shape(x), dtype=dtypes.int32)
# The first step in the Sturm sequence recurrence
# requires special care if x is equal to alpha[0].
def sturm_step0():
q = alpha[0] - x
count = array_ops.where(q < 0, ones, zeros)
q = array_ops.where(
math_ops.equal(alpha[0], x), alpha0_perturbation, q)
return q, count
# Subsequent steps all take this form:
def sturm_step(i, q, count):
q = alpha[i] - beta_sq[i - 1] / q - x
count = array_ops.where(q <= pivmin, count + 1, count)
q = array_ops.where(q <= pivmin, math_ops.minimum(q, -pivmin), q)
return q, count
# The first step initializes q and count.
q, count = sturm_step0()
# Peel off ((n-1) % blocksize) steps from the main loop, so we can run
# the bulk of the iterations unrolled by a factor of blocksize.
blocksize = 16
i = 1
peel = (n - 1) % blocksize
unroll_cnt = peel
def unrolled_steps(start, q, count):
for j in range(unroll_cnt):
q, count = sturm_step(start + j, q, count)
return start + unroll_cnt, q, count
i, q, count = unrolled_steps(i, q, count)
# Run the remaining steps of the Sturm sequence using a partially
# unrolled while loop.
unroll_cnt = blocksize
cond = lambda i, q, count: math_ops.less(i, n)
_, _, count = control_flow_ops.while_loop(
cond, unrolled_steps, [i, q, count], back_prop=False)
return count
with ops.name_scope('compute_eigenvalues'):
if alpha.dtype.is_complex:
alpha = math_ops.real(alpha)
beta_sq = math_ops.real(math_ops.conj(beta) * beta)
beta_abs = math_ops.sqrt(beta_sq)
else:
beta_sq = math_ops.square(beta)
beta_abs = math_ops.abs(beta)
# Estimate the largest and smallest eigenvalues of T using the
# Gershgorin circle theorem.
finfo = np.finfo(alpha.dtype.as_numpy_dtype)
off_diag_abs_row_sum = array_ops.concat(
[beta_abs[:1], beta_abs[:-1] + beta_abs[1:], beta_abs[-1:]], axis=0)
lambda_est_max = math_ops.minimum(
finfo.max, math_ops.reduce_max(alpha + off_diag_abs_row_sum))
lambda_est_min = math_ops.maximum(
finfo.min, math_ops.reduce_min(alpha - off_diag_abs_row_sum))
# Upper bound on 2-norm of T.
t_norm = math_ops.maximum(
math_ops.abs(lambda_est_min), math_ops.abs(lambda_est_max))
# Compute the smallest allowed pivot in the Sturm sequence to avoid
# overflow.
one = np.ones([], dtype=alpha.dtype.as_numpy_dtype)
safemin = np.maximum(one / finfo.max, (one + finfo.eps) * finfo.tiny)
pivmin = safemin * math_ops.maximum(one, math_ops.reduce_max(beta_sq))
alpha0_perturbation = math_ops.square(finfo.eps * beta_abs[0])
abs_tol = finfo.eps * t_norm
if tol:
abs_tol = math_ops.maximum(tol, abs_tol)
# In the worst case, when the absolute tolerance is eps*lambda_est_max
# and lambda_est_max = -lambda_est_min, we have to take as many
# bisection steps as there are bits in the mantissa plus 1.
max_it = finfo.nmant + 1
# Determine the indices of the desired eigenvalues, based on select
# and select_range.
asserts = None
if select == 'a':
target_counts = math_ops.range(n)
elif select == 'i':
asserts = check_ops.assert_less_equal(
select_range[0],
select_range[1],
message='Got empty index range in select_range.')
target_counts = math_ops.range(select_range[0], select_range[1] + 1)
elif select == 'v':
asserts = check_ops.assert_less(
select_range[0],
select_range[1],
message='Got empty interval in select_range.')
else:
raise ValueError("'select must have a value in {'a', 'i', 'v'}.")
if asserts:
with ops.control_dependencies([asserts]):
alpha = array_ops.identity(alpha)
# Run binary search for all desired eigenvalues in parallel, starting
# from an interval slightly wider than the estimated
# [lambda_est_min, lambda_est_max].
fudge = 2.1 # We widen starting interval the Gershgorin interval a bit.
norm_slack = math_ops.cast(n, alpha.dtype) * fudge * finfo.eps * t_norm
if select in {'a', 'i'}:
lower = lambda_est_min - norm_slack - 2 * fudge * pivmin
upper = lambda_est_max + norm_slack + fudge * pivmin
else:
# Count the number of eigenvalues in the given range.
lower = select_range[0] - norm_slack - 2 * fudge * pivmin
upper = select_range[1] + norm_slack + fudge * pivmin
first = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, lower)
last = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, upper)
target_counts = math_ops.range(first, last)
# Pre-broadcast the scalars used in the Sturm sequence for improved
# performance.
upper = math_ops.minimum(upper, finfo.max)
lower = math_ops.maximum(lower, finfo.min)
target_shape = array_ops.shape(target_counts)
lower = array_ops.broadcast_to(lower, shape=target_shape)
upper = array_ops.broadcast_to(upper, shape=target_shape)
pivmin = array_ops.broadcast_to(pivmin, target_shape)
alpha0_perturbation = array_ops.broadcast_to(alpha0_perturbation,
target_shape)
# We compute the midpoint as 0.5*lower + 0.5*upper to avoid overflow in
# (lower + upper) or (upper - lower) when the matrix has eigenvalues
# with magnitude greater than finfo.max / 2.
def midpoint(lower, upper):
return (0.5 * lower) + (0.5 * upper)
def continue_binary_search(i, lower, upper):
return math_ops.logical_and(
math_ops.less(i, max_it),
math_ops.less(abs_tol, math_ops.reduce_max(upper - lower)))
def binary_search_step(i, lower, upper):
mid = midpoint(lower, upper)
counts = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, mid)
lower = array_ops.where(counts <= target_counts, mid, lower)
upper = array_ops.where(counts > target_counts, mid, upper)
return i + 1, lower, upper
# Start parallel binary searches.
_, lower, upper = control_flow_ops.while_loop(continue_binary_search,
binary_search_step,
[0, lower, upper])
return midpoint(lower, upper)
def _compute_eigenvectors(alpha, beta, eigvals):
"""Implements inverse iteration to compute eigenvectors."""
with ops.name_scope('compute_eigenvectors'):
k = array_ops.size(eigvals)
n = array_ops.size(alpha)
alpha = math_ops.cast(alpha, dtype=beta.dtype)
# Eigenvectors corresponding to cluster of close eigenvalues are
# not unique and need to be explicitly orthogonalized. Here we
# identify such clusters. Note: This function assumes that
# eigenvalues are sorted in non-decreasing order.
gap = eigvals[1:] - eigvals[:-1]
eps = np.finfo(eigvals.dtype.as_numpy_dtype).eps
t_norm = math_ops.maximum(
math_ops.abs(eigvals[0]), math_ops.abs(eigvals[-1]))
gaptol = np.sqrt(eps) * t_norm
# Find the beginning and end of runs of eigenvectors corresponding
# to eigenvalues closer than "gaptol", which will need to be
# orthogonalized against each other.
close = math_ops.less(gap, gaptol)
left_neighbor_close = array_ops.concat([[False], close], axis=0)
right_neighbor_close = array_ops.concat([close, [False]], axis=0)
ortho_interval_start = math_ops.logical_and(
math_ops.logical_not(left_neighbor_close), right_neighbor_close)
ortho_interval_start = array_ops.squeeze(
array_ops.where_v2(ortho_interval_start), axis=-1)
ortho_interval_end = math_ops.logical_and(
left_neighbor_close, math_ops.logical_not(right_neighbor_close))
ortho_interval_end = array_ops.squeeze(
array_ops.where_v2(ortho_interval_end), axis=-1) + 1
num_clusters = array_ops.size(ortho_interval_end)
# We perform inverse iteration for all eigenvectors in parallel,
# starting from a random set of vectors, until all have converged.
v0 = math_ops.cast(
stateless_random_ops.stateless_random_normal(
shape=(k, n), seed=[7, 42]),
dtype=beta.dtype)
nrm_v = norm(v0, axis=1)
v0 = v0 / nrm_v[:, array_ops.newaxis]
zero_nrm = constant_op.constant(0, shape=nrm_v.shape, dtype=nrm_v.dtype)
# Replicate alpha-eigvals(ik) and beta across the k eigenvectors so we
# can solve the k systems
# [T - eigvals(i)*eye(n)] x_i = r_i
# simultaneously using the batching mechanism.
eigvals_cast = math_ops.cast(eigvals, dtype=beta.dtype)
alpha_shifted = (
alpha[array_ops.newaxis, :] - eigvals_cast[:, array_ops.newaxis])
beta = array_ops.tile(beta[array_ops.newaxis, :], [k, 1])
diags = [beta, alpha_shifted, math_ops.conj(beta)]
def orthogonalize_close_eigenvectors(eigenvectors):
# Eigenvectors corresponding to a cluster of close eigenvalues are not
# uniquely defined, but the subspace they span is. To avoid numerical
# instability, we explicitly mutually orthogonalize such eigenvectors
# after each step of inverse iteration. It is customary to use
# modified Gram-Schmidt for this, but this is not very efficient
# on some platforms, so here we defer to the QR decomposition in
# TensorFlow.
def orthogonalize_cluster(cluster_idx, eigenvectors):
start = ortho_interval_start[cluster_idx]
end = ortho_interval_end[cluster_idx]
update_indices = array_ops.expand_dims(
math_ops.range(start, end), -1)
vectors_in_cluster = eigenvectors[start:end, :]
# We use the builtin QR factorization to orthonormalize the
# vectors in the cluster.
q, _ = qr(transpose(vectors_in_cluster))
vectors_to_update = transpose(q)
eigenvectors = array_ops.tensor_scatter_nd_update(
eigenvectors, update_indices, vectors_to_update)
return cluster_idx + 1, eigenvectors
_, eigenvectors = control_flow_ops.while_loop(
lambda i, ev: math_ops.less(i, num_clusters),
orthogonalize_cluster, [0, eigenvectors])
return eigenvectors
def continue_iteration(i, _, nrm_v, nrm_v_old):
max_it = 5 # Taken from LAPACK xSTEIN.
min_norm_growth = 0.1
norm_growth_factor = constant_op.constant(
1 + min_norm_growth, dtype=nrm_v.dtype)
# We stop the inverse iteration when we reach the maximum number of
# iterations or the norm growths is less than 10%.
return math_ops.logical_and(
math_ops.less(i, max_it),
math_ops.reduce_any(
math_ops.greater_equal(
math_ops.real(nrm_v),
math_ops.real(norm_growth_factor * nrm_v_old))))
def inverse_iteration_step(i, v, nrm_v, nrm_v_old):
v = tridiagonal_solve(
diags,
v,
diagonals_format='sequence',
partial_pivoting=True,
perturb_singular=True)
nrm_v_old = nrm_v
nrm_v = norm(v, axis=1)
v = v / nrm_v[:, array_ops.newaxis]
v = orthogonalize_close_eigenvectors(v)
return i + 1, v, nrm_v, nrm_v_old
_, v, nrm_v, _ = control_flow_ops.while_loop(continue_iteration,
inverse_iteration_step,
[0, v0, nrm_v, zero_nrm])
return transpose(v)
alpha = ops.convert_to_tensor(alpha, name='alpha')
n = alpha.shape[0]
if n <= 1:
return math_ops.real(alpha)
beta = ops.convert_to_tensor(beta, name='beta')
if alpha.dtype != beta.dtype:
raise ValueError("'alpha' and 'beta' must have the same type.")
eigvals = _compute_eigenvalues(alpha, beta)
if eigvals_only:
return eigvals
eigvectors = _compute_eigenvectors(alpha, beta, eigvals)
return eigvals, eigvectors
|
sarvex/tensorflow
|
tensorflow/python/ops/linalg/linalg_impl.py
|
Python
|
apache-2.0
| 65,679
|
[
"Gaussian"
] |
63838a28ea72c76afada1986c5ba4810bc05a8be10d32b83f6b47f814776a94c
|
from sandbox.rocky.tf.q_functions.base import QFunction
import sandbox.rocky.tf.core.layers as L
import tensorflow as tf
import numpy as np
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.policies.base import StochasticPolicy
class NAFMLPQFunction(QFunction, LayersPowered, Serializable):
def __init__(
self,
env_spec,
name='nafqnet',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
action_merge_layer=0,
output_nonlinearity=None,
hidden_W_init=L.XavierUniformInitializer(),
hidden_b_init=L.ZerosInitializer(),
output_W_init=L.XavierUniformInitializer(),
output_b_init=L.ZerosInitializer(),
bn=False):
Serializable.quick_init(self, locals())
assert not env_spec.action_space.is_discrete
action_dim = env_spec.action_space.flat_dim
self._action_dim = action_dim
self._env_spec = env_spec
n_layers = len(hidden_sizes)
action_merge_layer = \
(action_merge_layer % n_layers + n_layers) % n_layers
with tf.variable_scope(name):
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs")
l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions")
l_policy_mu = L.InputLayer(shape=(None, action_dim), name="policy_mu")
l_policy_sigma = L.InputLayer(shape=(None, action_dim, action_dim), name="policy_sigma")
l_hidden = l_obs
idx = 0
l_hidden_kwargs = dict(
W=hidden_W_init,
b=hidden_b_init,
nonlinearity=hidden_nonlinearity,
)
l_output_kwargs = dict(
W=output_W_init,
b=output_b_init,
)
while idx < action_merge_layer:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
_idx = idx
_l_hidden = l_hidden
# compute L network
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="L_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
l_L = L.DenseLayer(
l_hidden,num_units=action_dim**2, nonlinearity=None,
name="L_h%d" % (idx + 1), **l_output_kwargs,)
# compute V network
idx = _idx
l_hidden = _l_hidden
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="V_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
l_V = L.DenseLayer(
l_hidden,num_units=1, nonlinearity=None,
name="V_h%d" % (idx + 1), **l_output_kwargs,)
# compute mu network
idx = _idx
l_hidden = _l_hidden
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="mu_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
if bn: l_hidden = L.batch_norm(l_hidden)
l_mu = L.DenseLayer(
l_hidden,num_units=action_dim, nonlinearity=tf.nn.tanh,
name="mu_h%d" % (idx + 1), **l_output_kwargs,)
L_var, V_var, mu_var = L.get_output([l_L, l_V, l_mu], deterministic=True)
V_var = tf.reshape(V_var, (-1,))
# compute advantage
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_A_sym(P_var, mu_var, l_action.input_var)
# compute Q
Q_var = A_var + V_var
# compute expected Q under Gaussian policy
e_A_var = self.get_e_A_sym(P_var, mu_var, l_policy_mu.input_var, l_policy_sigma.input_var)
e_Q_var = e_A_var + V_var
self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], Q_var)
self._f_e_qval = tensor_utils.compile_function([l_obs.input_var, l_policy_mu.input_var,
l_policy_sigma.input_var], e_Q_var)
self._L_layer = l_L
self._V_layer = l_V
self._mu_layer = l_mu
self._obs_layer = l_obs
self._action_layer = l_action
self._policy_mu_layer = l_policy_mu
self._policy_sigma_layer = l_policy_sigma
self._output_nonlinearity = output_nonlinearity
self.init_policy()
LayersPowered.__init__(self, [l_L, l_V, l_mu])
def init_policy(self):
pass
def get_L_sym(self, L_vec_var):
L = tf.reshape(L_vec_var, (-1, self._action_dim, self._action_dim))
return tf.matrix_band_part(L, -1, 0) - \
tf.matrix_diag(tf.matrix_diag_part(L)) + \
tf.matrix_diag(tf.exp(tf.matrix_diag_part(L)))
def get_P_sym(self, L_mat_var):
return tf.matmul(L_mat_var, tf.matrix_transpose(L_mat_var))
def get_e_A_sym(self, P_var, mu_var, policy_mu_var, policy_sigma_var):
e_A_var1 = self.get_A_sym(P_var, mu_var, policy_mu_var)
e_A_var2 = - 0.5 * tf.reduce_sum(tf.matrix_diag_part(
tf.matmul(P_var, policy_sigma_var)), 1)
#e_A_var2 = - 0.5 * tf.trace(tf.matmul(P_var, policy_sigma_var))
return e_A_var1 + e_A_var2
def get_A_sym(self, P_var, mu_var, action_var):
delta_var = action_var - mu_var
delta_mat_var = tf.reshape(delta_var, (-1, self._action_dim, 1))
P_delta_var = tf.squeeze(tf.matmul(P_var, delta_mat_var),[2])
return -0.5 * tf.reduce_sum(delta_var * P_delta_var, 1)
def get_qval(self, observations, actions):
qvals = self._f_qval(observations, actions)
return qvals
def get_output_sym(self, obs_var, **kwargs):
L_var, V_var, mu_var = L.get_output(
[self._L_layer, self._V_layer, self._mu_layer],
{self._obs_layer: obs_var},
**kwargs
)
V_var = tf.reshape(V_var, (-1,))
return L_var, V_var, mu_var
def _get_qval_sym(self, obs_var, action_var, **kwargs):
L_var, V_var, mu_var = self.get_output_sym(obs_var, **kwargs)
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_A_sym(P_var, mu_var, action_var)
Q_var = A_var + V_var
return Q_var, A_var, V_var
def get_qval_sym(self, obs_var, action_var, **kwargs):
return self._get_qval_sym(obs_var, action_var, **kwargs)[0]
def get_e_qval(self, observations, policy):
if isinstance(policy, StochasticPolicy):
agent_info = policy.dist_info(observations)
mu, log_std = agent_info['mean'], agent_info["log_std"]
std = np.array([np.diag(x) for x in np.exp(log_std)], dtype=log_std.dtype)
qvals = self._f_e_qval(observations, mu, std)
else:
actions, _ = policy.get_actions(observations)
qvals = self.get_qval(observations, actions)
return qvals
def get_e_qval_sym(self, obs_var, policy, **kwargs):
if isinstance(policy, StochasticPolicy):
agent_info = policy.dist_info_sym(obs_var)
mu, log_std = agent_info['mean'], agent_info["log_std"]
std = tf.matrix_diag(tf.exp(log_std))
L_var, V_var, mu_var = self.get_output_sym(obs_var, **kwargs)
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_e_A_sym(P_var, mu_var, mu, std)
qvals = A_var + V_var
else:
mu = policy.get_action_sym(obs_var)
qvals = self.get_qval_sym(obs_var, mu, **kwargs)
return qvals
def get_cv_sym(self, obs_var, action_var, policy, **kwargs):
#_, avals, _ = self._get_qval_sym(obs_var, action_var, **kwargs)
qvals = self.get_qval_sym(obs_var, action_var, **kwargs)
e_qvals = self.get_e_qval_sym(obs_var, policy, **kwargs)
avals = qvals - e_qvals
return avals
|
brain-research/mirage-rl-qprop
|
sandbox/rocky/tf/q_functions/naf_mlp_q_function.py
|
Python
|
mit
| 8,719
|
[
"Gaussian"
] |
0f260f4aa8c861d5198884b4c3fd151843c0d93990c0434c458f480645a3bf31
|
stageDefaults = {
'distributed': True,
'walltime': "08:00:00",
'memInGB': 8,
'queue': "batch",
'modules': [
"bwa-gcc/0.5.9",
"samtools-gcc/0.1.16",
"picard/1.53",
"python-gcc/2.6.4",
"R-gcc/2.12.0",
"gatk/1.6-7"
]
}
stages = {
"fastqc": {
"command": "fastqc --quiet -o %outdir %seq",
"walltime": "10:00:00",
'modules': [ "fastqc/0.10.1" ]
},
'alignBWA': {
'command': "bwa aln -t 8 %encodingflag %ref %seq > %out",
'walltime': "18:00:00",
'queue': 'smp',
'memInGB': 23
},
'alignToSamSE': {
'command': "bwa samse %ref %meta %align %seq > %out"
},
'alignToSamPE': {
'command': "bwa sampe %ref %meta %align1 %align2 %seq1 %seq2 > %out"
},
'samToSortedBam': {
'command': "./SortSam 6 VALIDATION_STRINGENCY=LENIENT INPUT=%seq OUTPUT=%out SORT_ORDER=coordinate",
'walltime': "32:00:00",
},
'mergeBams': {
'command': "./PicardMerge 6 %baminputs USE_THREADING=true VALIDATION_STRINGENCY=LENIENT AS=true OUTPUT=%out",
'walltime': "72:00:00"
},
'indexBam': {
'command': "samtools index %bam"
},
'flagstat': {
'command': "samtools flagstat %bam > %out",
'walltime': "00:10:00"
},
'igvcount': {
'command': "igvtools count %bam %out hg19",
'modules': [ "igvtools/1.5.15" ]
},
'indexVCF': {
'command': "./vcftools_prepare.sh %vcf",
'modules': [ "tabix/0.2.5" ]
},
'realignIntervals': {
# Hard-coded to take 2 known indels files right now
'command': "./GenomeAnalysisTK 1 -T RealignerTargetCreator -R %ref -I %bam --known %indels_goldstandard --known %indels_1000G -log %log -o %out",
'memInGB': 23,
'walltime': "7:00:00:00"
},
'realign': {
'command': "./GenomeAnalysisTK 22 -T IndelRealigner -R %ref -I %bam -targetIntervals %intervals -log %log -o %out",
'memInGB': 23,
'walltime': "7:00:00:00"
},
'dedup': {
'command': "./MarkDuplicates 6 INPUT=%bam REMOVE_DUPLICATES=true VALIDATION_STRINGENCY=LENIENT AS=true METRICS_FILE=%log OUTPUT=%out",
'walltime': '7:00:00:00'
},
'baseQualRecalCount': {
'command': "./GenomeAnalysisTK 12 -T CountCovariates -I %bam -R %ref --knownSites %dbsnp -nt 8 -l INFO -cov ReadGroupCovariate -cov QualityScoreCovariate -cov CycleCovariate -cov DinucCovariate -log %log -recalFile %out",
'queue': 'smp',
'memInGB': 23,
'walltime': "3:00:00:00"
},
'baseQualRecalTabulate': {
'command': "./GenomeAnalysisTK 4 -T TableRecalibration -I %bam -R %ref -recalFile %csvfile -l INFO -log %log -o %out",
'walltime': "3:00:00:00"
},
'callSNPs': {
'command': "./GenomeAnalysisTK 12 -T UnifiedGenotyper -nt 8 -R %ref -I %bam --dbsnp %dbsnp -stand_call_conf 50.0 -stand_emit_conf 10.0 -dcov 1600 -l INFO -A AlleleBalance -A DepthOfCoverage -A FisherStrand -glm SNP -log %log -o %out",
'queue': 'smp',
'memInGB': 23,
'walltime': "24:00:00"
},
'callIndels': {
'command': "./GenomeAnalysisTK 12 -T UnifiedGenotyper -nt 8 -R %ref -I %bam --dbsnp %dbsnp -stand_call_conf 50.0 -stand_emit_conf 10.0 -dcov 1600 -l INFO -A AlleleBalance -A DepthOfCoverage -A FisherStrand -glm INDEL -log %log -o %out",
'queue': 'smp',
'memInGB': 23,
'walltime': "24:00:00"
},
'filterSNPs': {
# Very minimal filters based on GATK recommendations. VQSR is preferable if possible.
'command': "./GenomeAnalysisTK 4 -T VariantFiltration -R %ref --variant %vcf --filterExpression 'QD < 2.0 || MQ < 40.0 || FS > 60.0 || HaplotypeScore > 13.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0' --filterName 'GATK_MINIMAL_FILTER' -log %log -o %out",
},
'filterIndels': {
# Very minimal filters based on GATK recommendations. VQSR is preferable if possible.
# If you have 10 or more samples GATK also recommends the filter InbreedingCoeff < -0.8
'command': "./GenomeAnalysisTK 4 -T VariantFiltration -R %ref --variant %vcf --filterExpression 'QD < 2.0 || ReadPosRankSum < -20.0 || FS > 200.0' --filterName 'GATK_MINIMAL_FILTER' -log %log -o %out",
},
'annotateEnsembl': {
# This command as written assumes that VEP and its cache have been
# downloaded in respective locations
# ./variant_effect_predictor_2.5
# ./variant_effect_predictor_2.5/vep_cache
'command': "perl variant_effect_predictor_2.5/variant_effect_predictor.pl --cache --dir variant_effect_predictor_2.5/vep_cache -i %vcf --vcf -o %out -species human --canonical --gene --protein --sift=b --polyphen=b > %log",
'modules': [ "perl/5.10.1", "ensembl/67" ]
},
'depthOfCoverage': {
'command': "./GenomeAnalysisTK 4 -T DepthOfCoverage -R %ref -I %bam -omitBaseOutput -ct 1 -ct 10 -ct 20 -ct 30 -o %out",
},
'collateReadcounts': {
'command': 'python count_flagstat_wgs.py %dir %outdir',
'walltime': "00:10:00"
}
}
|
claresloggett/test-repo-varcall
|
pipeline_stages_config.py
|
Python
|
mit
| 5,140
|
[
"BWA"
] |
571d8c7c55cf6d1988fdc29eb310a405a7a9ab4c4830036f1feccf6e8a4dd378
|
#!/usr/bin/enb python
# encoding: utf-8
import urllib2
import time
import os
PROXIES_LIST="proxies.lst"
def getHttpProxyHandlers():
with open( PROXIES_LIST ) as f:
for line in f:
yield urllib2.ProxyHandler( {"http":line.strip()} )
def getOpeners():
for httpProxyHandler in getHttpProxyHandlers():
opener = urllib2.build_opener(httpProxyHandler)
opener.addheaders= [("User-agent","Mozilla/5.0 (iPhone; CPU iPhone OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3"),
("Referer","http://jkpc.gl-inc.jp/?page_id=261&event_id=10003&card_id=1030"),
("X-Requested-With","XMLHttpRequest"),
("Content-Type","application/x-www-form-urlencoded; charset=UTF-8")
]
yield opener
def getter(opener, url, timeout):
return opener.open(url,timeout=timeout)
def poster(opener, url, body, timeout):
return op.open(url,data=body,timeout=timeout)
import threading
for opener in getOpeners():
print opener
url="http://www.firefly.kutc.kansai-u.ac.jp/~k843966/"
try:
getter(opener, url,timeout=10)
except:
pass
|
syakesaba/selenium-xvfb
|
proxyGen/getter.py
|
Python
|
mit
| 1,238
|
[
"Firefly"
] |
d691ebad40d6ee1ffdaf23f95a8ad60cdad9d3a653deee8260129f54c5944181
|
import pymongo
import bson
import unittest
import mock
import cPickle
from datetime import datetime
from mongoengine import *
from mongoengine.base import _document_registry
from mongoengine.connection import _get_db, connect
import mongoengine.connection
from mock import MagicMock, Mock, call
import pymongo
mongoengine.connection.set_default_db("test")
# has to be top level for pickling
class Citizen(Document):
age = mongoengine.fields.IntField()
class DocumentTest(unittest.TestCase):
def setUp(self):
connect()
self.db = _get_db()
class Person(Document):
name = StringField()
age = IntField()
uid = ObjectIdField()
friends = ListField(StringField())
self.Person = Person
def tearDown(self):
self.Person.drop_collection()
_document_registry.clear()
Citizen.drop_collection()
def test_bool(self):
class EmptyDoc(EmbeddedDocument):
pass
empty_doc = EmptyDoc()
self.assertTrue(bool(empty_doc))
nonempty_doc = self.Person(name='Adam')
self.assertTrue(bool(nonempty_doc))
def test_drop_collection(self):
"""Ensure that the collection may be dropped from the database.
"""
self.Person(name='Test').save()
collection = self.Person._get_collection_name()
self.assertTrue(collection in self.db.collection_names())
self.Person.drop_collection()
self.assertFalse(collection in self.db.collection_names())
def test_definition(self):
"""Ensure that document may be defined using fields.
"""
name_field = StringField()
age_field = IntField()
_document_registry.clear()
class Person(Document):
name = name_field
age = age_field
non_field = True
self.assertEqual(Person._fields['name'], name_field)
self.assertEqual(Person._fields['age'], age_field)
self.assertFalse('non_field' in Person._fields)
self.assertTrue('id' in Person._fields)
# Test iteration over fields
fields = list(Person())
self.assertTrue('name' in fields and 'age' in fields)
# Ensure Document isn't treated like an actual document
self.assertFalse(hasattr(Document, '_fields'))
def test_get_superclasses(self):
"""Ensure that the correct list of superclasses is assembled.
"""
class Animal(Document): pass
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
mammal_superclasses = {'Animal': Animal}
self.assertEqual(Mammal._superclasses, mammal_superclasses)
dog_superclasses = {
'Animal': Animal,
'Animal.Mammal': Mammal,
}
self.assertEqual(Dog._superclasses, dog_superclasses)
def test_get_subclasses(self):
"""Ensure that the correct list of subclasses is retrieved by the
_get_subclasses method.
"""
class Animal(Document): pass
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
mammal_subclasses = {
'Animal.Mammal.Dog': Dog,
'Animal.Mammal.Human': Human
}
self.assertEqual(Mammal._get_subclasses(), mammal_subclasses)
animal_subclasses = {
'Animal.Fish': Fish,
'Animal.Mammal': Mammal,
'Animal.Mammal.Dog': Dog,
'Animal.Mammal.Human': Human
}
self.assertEqual(Animal._get_subclasses(), animal_subclasses)
def test_external_super_and_sub_classes(self):
"""Ensure that the correct list of sub and super classes is assembled.
when importing part of the model
"""
class Base(Document): pass
class Animal(Base): pass
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
mammal_superclasses = {'Base': Base, 'Base.Animal': Animal}
self.assertEqual(Mammal._superclasses, mammal_superclasses)
dog_superclasses = {
'Base': Base,
'Base.Animal': Animal,
'Base.Animal.Mammal': Mammal,
}
self.assertEqual(Dog._superclasses, dog_superclasses)
animal_subclasses = {
'Base.Animal.Fish': Fish,
'Base.Animal.Mammal': Mammal,
'Base.Animal.Mammal.Dog': Dog,
'Base.Animal.Mammal.Human': Human
}
self.assertEqual(Animal._get_subclasses(), animal_subclasses)
mammal_subclasses = {
'Base.Animal.Mammal.Dog': Dog,
'Base.Animal.Mammal.Human': Human
}
self.assertEqual(Mammal._get_subclasses(), mammal_subclasses)
Base.drop_collection()
h = Human()
h.save()
self.assertEquals(Human.count({}), 1)
self.assertEquals(Mammal.count({}), 1)
self.assertEquals(Animal.count({}), 1)
self.assertEquals(Base.count({}), 1)
Base.drop_collection()
def test_polymorphic_queries(self):
"""Ensure that the correct subclasses are returned from a query"""
class Animal(Document): pass
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Human().save()
Dog().save()
classes = [obj.__class__ for obj in Animal.objects]
self.assertEqual(classes, [Animal, Fish, Mammal, Human, Dog])
classes = [obj.__class__ for obj in Mammal.objects]
self.assertEqual(classes, [Mammal, Human, Dog])
classes = [obj.__class__ for obj in Human.objects]
self.assertEqual(classes, [Human])
Animal.drop_collection()
def test_reference_inheritance(self):
class Stats(Document):
created = DateTimeField(default=datetime.now)
meta = {'allow_inheritance': False}
class CompareStats(Document):
generated = DateTimeField(default=datetime.now)
stats = ListField(ReferenceField(Stats))
Stats.drop_collection()
CompareStats.drop_collection()
list_stats = []
for i in xrange(10):
s = Stats()
s.save()
list_stats.append(s)
cmp_stats = CompareStats(stats=list_stats)
cmp_stats.save()
self.assertEqual(list_stats, CompareStats.find_one({}).stats)
def test_inheritance(self):
"""Ensure that document may inherit fields from a superclass document.
"""
class Employee(self.Person):
salary = IntField()
self.assertTrue('name' in Employee._fields)
self.assertTrue('salary' in Employee._fields)
self.assertEqual(Employee._get_collection_name(),
self.Person._get_collection_name())
# Ensure that MRO error is not raised
class A(Document): pass
class B(A): pass
class C(B): pass
def test_allow_inheritance(self):
"""Ensure that inheritance may be disabled on simple classes and that
_cls and _types will not be used.
"""
class Animal(Document):
name = StringField()
meta = {'allow_inheritance': False}
Animal.drop_collection()
def create_dog_class():
class Dog(Animal):
pass
self.assertRaises(ValueError, create_dog_class)
# Check that _cls etc aren't present on simple documents
dog = Animal(name='dog')
dog.save()
collection = self.db[Animal._get_collection_name()]
obj = collection.find_one()
self.assertFalse('_cls' in obj)
self.assertFalse('_types' in obj)
Animal.drop_collection()
def create_employee_class():
class Employee(self.Person):
meta = {'allow_inheritance': False}
self.assertRaises(ValueError, create_employee_class)
# Test the same for embedded documents
class Comment(EmbeddedDocument):
content = StringField()
meta = {'allow_inheritance': False}
def create_special_comment():
class SpecialComment(Comment):
pass
self.assertRaises(ValueError, create_special_comment)
comment = Comment(content='test')
self.assertFalse('_cls' in comment.to_mongo())
self.assertFalse('_types' in comment.to_mongo())
def test_collection_name(self):
"""Ensure that a collection with a specified name may be used.
"""
collection = 'personCollTest'
if collection in self.db.collection_names():
self.db.drop_collection(collection)
_document_registry.clear()
class Person(Document):
name = StringField()
meta = {'collection': collection}
user = Person(name="Test User")
user.save()
self.assertTrue(collection in self.db.collection_names())
user_obj = self.db[collection].find_one()
self.assertEqual(user_obj['name'], "Test User")
user_obj = Person.objects[0]
self.assertEqual(user_obj.name, "Test User")
Person.drop_collection()
self.assertFalse(collection in self.db.collection_names())
def test_collection_name_and_primary(self):
"""Ensure that a collection with a specified name may be used.
"""
_document_registry.clear()
class Person(Document):
name = StringField(primary_key=True)
meta = {'collection': 'app'}
user = Person(name="Test User")
user.save()
user_obj = Person.objects[0]
self.assertEqual(user_obj.name, "Test User")
Person.drop_collection()
def test_creation(self):
"""Ensure that document may be created using keyword arguments.
"""
person = self.Person(name="Test User", age=30)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 30)
def test_reload(self):
"""Ensure that attributes may be reloaded.
"""
person = self.Person(name="Test User", age=20)
person.save()
person_obj = self.Person.find_one({})
person_obj.name = "Mr Test User"
person_obj.age = 21
person_obj.save()
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 20)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
def test_dictionary_access(self):
"""Ensure that dictionary-style field access works properly.
"""
person = self.Person(name='Test User', age=30)
self.assertEquals(person['name'], 'Test User')
self.assertRaises(KeyError, person.__getitem__, 'salary')
self.assertRaises(KeyError, person.__setitem__, 'salary', 50)
person['name'] = 'Another User'
self.assertEquals(person['name'], 'Another User')
# Length = length(assigned fields + id)
self.assertEquals(len(person), 5)
self.assertTrue('age' in person)
person.age = None
self.assertFalse('age' in person)
self.assertFalse('nationality' in person)
def test_embedded_document(self):
"""Ensure that embedded documents are set up correctly.
"""
class Comment(EmbeddedDocument):
content = StringField()
self.assertTrue('content' in Comment._fields)
self.assertFalse('id' in Comment._fields)
self.assertFalse('collection' in Comment._meta)
def test_embedded_document_validation(self):
"""Ensure that embedded documents may be validated.
"""
class Comment(EmbeddedDocument):
date = DateTimeField()
content = StringField(required=True)
comment = Comment()
self.assertRaises(ValidationError, comment.validate)
comment.content = 'test'
comment.validate()
comment.date = 4
self.assertRaises(ValidationError, comment.validate)
comment.date = datetime.now()
comment.validate()
def test_save(self):
"""Ensure that a document may be saved in the database.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(person_obj['name'], 'Test User')
self.assertEqual(person_obj['age'], 30)
self.assertEqual(person_obj['_id'], person.id)
# Test skipping validation on save
class Recipient(Document):
email = EmailField(required=True)
recipient = Recipient(email='root@localhost')
self.assertRaises(ValidationError, recipient.save)
try:
recipient.save(validate=False)
except ValidationError:
self.fail()
def test_save_to_a_value_that_equates_to_false(self):
class Thing(EmbeddedDocument):
count = IntField()
class User(Document):
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
user = User(thing=Thing(count=1))
user.save()
user.reload()
user.thing.count = 0
user.save()
user.reload()
self.assertEquals(user.thing.count, 0)
def test_save_max_recursion_not_hit(self):
_document_registry.clear()
class Person(Document):
name = StringField()
parent = ReferenceField('self')
friend = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p1.friend = p2
p1.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
p0 = Person.find_one({})
p0.name = 'wpjunior'
p0.save()
def test_update(self):
"""Ensure that an existing document is updated instead of be overwritten.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Create same person object, with same id, without age
same_person = self.Person(name='Test')
same_person.id = person.id
same_person.save()
# Confirm only one object
self.assertEquals(self.Person.count({}), 1)
# reload
person.reload()
same_person.reload()
# Confirm the same
self.assertEqual(person, same_person)
self.assertEqual(person.name, same_person.name)
self.assertEqual(person.age, same_person.age)
# Confirm the saved values
self.assertEqual(person.name, 'Test')
self.assertIsNone(person.age)
def test_document_update(self):
person = self.Person(name='dcrosta',
id=bson.ObjectId(), uid=bson.ObjectId())
resp = person.set(name='Dan Crosta')
self.assertEquals(resp['n'], 0)
author = self.Person(name='dcrosta')
author.save()
author.set(name='Dan Crosta')
author.reload()
p1 = self.Person.find_one({})
self.assertEquals(p1.name, author.name)
p1.set(uid=None)
p1.reload()
self.assertEquals(p1.uid, None)
def unset_primary_key():
person = self.Person.find_one({})
person.set(id=None)
def update_no_value_raises():
person = self.Person.find_one({})
person.set()
self.assertRaises(pymongo.errors.OperationFailure, unset_primary_key)
self.assertRaises(pymongo.errors.OperationFailure, update_no_value_raises)
def test_addtoset_on_null_list(self):
person = self.Person(
name = 'Bruce Banner',
id = bson.ObjectId(),
uid = bson.ObjectId(),
friends = None
)
person.save()
person.update_one({'$addToSet' : {'friends' : {'$each' : ['Bob','Fan']}}})
person.reload()
self.assertTrue(len(person.friends) == 2)
self.assertTrue('Bob' in person.friends)
self.assertTrue('Fan' in person.friends)
def test_non_existant_inc(self):
person = self.Person(name='dcrosta',
id=bson.ObjectId(), uid=bson.ObjectId())
person.save()
person.inc(age=5)
person.reload()
self.assertEquals(person.age, 5)
person.inc(age=5)
person.reload()
self.assertEquals(person.age, 10)
def test_embedded_update(self):
"""
Test update on `EmbeddedDocumentField` fields
"""
class Page(EmbeddedDocument):
log_message = StringField(required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.find_one({})
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.find_one({})
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_embedded_update_db_field(self):
"""
Test update on `EmbeddedDocumentField` fields when db_field is other
than default.
"""
class Page(EmbeddedDocument):
log_message = StringField(db_field="page_log_message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.find_one({})
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.find_one({})
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_delete(self):
"""Ensure that document may be deleted using the delete method.
"""
person = self.Person(name="Test User", age=30)
person.save()
self.assertEqual(len(self.Person.objects), 1)
person.delete()
self.assertEqual(len(self.Person.objects), 0)
def test_save_custom_id(self):
"""Ensure that a document may be saved with a custom _id.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
id='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_custom_pk(self):
"""Ensure that a document may be saved with a custom _id using pk alias.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
pk='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_list(self):
"""Ensure that a list field may be properly saved.
"""
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content='Went for a walk today...')
post.tags = tags = ['fun', 'leisure']
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.save()
collection = self.db[BlogPost._get_collection_name()]
post_obj = collection.find_one()
self.assertEqual(post_obj['tags'], tags)
for comment_obj, comment in zip(post_obj['comments'], comments):
self.assertEqual(comment_obj['content'], comment['content'])
BlogPost.drop_collection()
def test_list_search_by_embedded(self):
class User(Document):
username = StringField(required=True)
meta = {'allow_inheritance': False}
class Comment(EmbeddedDocument):
comment = StringField()
user = ReferenceField(User,
required=True)
meta = {'allow_inheritance': False}
class Page(Document):
comments = ListField(EmbeddedDocumentField(Comment))
meta = {'allow_inheritance': False}
User.drop_collection()
Page.drop_collection()
u1 = User(username="wilson")
u1.save()
u2 = User(username="rozza")
u2.save()
u3 = User(username="hmarr")
u3.save()
p1 = Page(comments = [Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world"),
Comment(user=u3, comment="Ping Pong"),
Comment(user=u1, comment="I like a beer")])
p1.save()
p2 = Page(comments = [Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world")])
p2.save()
p3 = Page(comments = [Comment(user=u3, comment="Its very good")])
p3.save()
p4 = Page(comments = [Comment(user=u2, comment="Heavy Metal song")])
p4.save()
self.assertEqual([p1, p2], list(Page.objects.filter(comments__user=u1)))
self.assertEqual([p1, p2, p4], list(Page.objects.filter(comments__user=u2)))
self.assertEqual([p1, p3], list(Page.objects.filter(comments__user=u3)))
def test_save_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
employee_obj = collection.find_one({'name': 'Test Employee'})
self.assertEqual(employee_obj['name'], 'Test Employee')
self.assertEqual(employee_obj['age'], 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(employee_obj['details']['position'], 'Developer')
def test_updating_an_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Test updating an embedded document
promoted_employee = Employee.objects.get(name='Test Employee')
promoted_employee.details.position = 'Senior Developer'
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.name, 'Test Employee')
self.assertEqual(promoted_employee.age, 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(promoted_employee.details.position, 'Senior Developer')
# Test removal
promoted_employee.details = None
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.details, None)
def test_save_reference(self):
"""Ensure that a document reference field may be saved in the database.
"""
class BlogPost(Document):
meta = {'collection': 'blogpost_1'}
content = StringField()
author = ReferenceField(self.Person)
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV today... how exciting.')
# Should only reference author when saving
post.author = author
post.save()
post_obj = BlogPost.find_one({})
# Test laziness
self.assertTrue(isinstance(post_obj._lazy_data['author'],
bson.dbref.DBRef))
self.assertTrue(isinstance(post_obj.author, self.Person))
self.assertEqual(post_obj.author.name, 'Test User')
# Ensure that the dereferenced object may be changed and saved
post_obj.author.age = 25
post_obj.author.save()
author = list(self.Person.objects(name='Test User'))[-1]
self.assertEqual(author.age, 25)
BlogPost.drop_collection()
def subclasses_and_unique_keys_works(self):
class A(Document):
pass
class B(A):
foo = BooleanField(unique=True)
A.drop_collection()
B.drop_collection()
A().save()
A().save()
B(foo=True).save()
self.assertEquals(A.count({}), 2)
self.assertEquals(B.count({}), 1)
A.drop_collection()
B.drop_collection()
def test_document_hash(self):
"""Test document in list, dict, set
"""
class User(Document):
pass
class BlogPost(Document):
pass
# Clear old datas
User.drop_collection()
BlogPost.drop_collection()
u1 = User.objects.create()
u2 = User.objects.create()
u3 = User.objects.create()
u4 = User() # New object
b1 = BlogPost.objects.create()
b2 = BlogPost.objects.create()
# in List
all_user_list = list(User.objects.all())
self.assertTrue(u1 in all_user_list)
self.assertTrue(u2 in all_user_list)
self.assertTrue(u3 in all_user_list)
self.assertFalse(u4 in all_user_list) # New object
self.assertFalse(b1 in all_user_list) # Other object
self.assertFalse(b2 in all_user_list) # Other object
# in Dict
all_user_dic = {}
for u in User.objects.all():
all_user_dic[u] = "OK"
self.assertEqual(all_user_dic.get(u1, False), "OK" )
self.assertEqual(all_user_dic.get(u2, False), "OK" )
self.assertEqual(all_user_dic.get(u3, False), "OK" )
self.assertEqual(all_user_dic.get(u4, False), False ) # New object
self.assertEqual(all_user_dic.get(b1, False), False ) # Other object
self.assertEqual(all_user_dic.get(b2, False), False ) # Other object
# in Set
all_user_set = set(User.find({}))
self.assertTrue(u1 in all_user_set)
def throw_invalid_document_error(self):
# test handles people trying to upsert
def throw_invalid_document_error():
class Blog(Document):
validate = DictField()
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_write_concern(self):
class ImportantThing(Document):
meta = {'write_concern': 2}
name = StringField()
class MajorityThing(Document):
meta = {'write_concern': 'majority',
'force_insert': True}
name = StringField()
class NormalThing(Document):
name = StringField()
# test save() of ImportantThing gets w=2
with mock.patch.object(ImportantThing._pymongo(), "save") as save_mock:
it = ImportantThing(id=bson.ObjectId())
save_mock.return_value = it.id
it.save()
save_mock.assert_called_with(it.to_mongo(), w=2)
# test insert() of MajorityThing gets w=majority
# note: uses insert() because force_insert is set
with mock.patch.object(MajorityThing._pymongo(), "insert") as insert_mock:
mt = MajorityThing(id=bson.ObjectId())
insert_mock.return_value = mt.id
mt.save()
insert_mock.assert_called_with(mt.to_mongo(), w='majority')
# test NormalThing gets default w=1
with mock.patch.object(NormalThing._pymongo(), "save") as save_mock:
nt = NormalThing(id=bson.ObjectId())
save_mock.return_value = nt.id
nt.save()
save_mock.assert_called_with(nt.to_mongo(), w=1)
# test ImportantThing update gets w=2
with mock.patch.object(ImportantThing._pymongo(), "update") as update_mock:
it.set(name="Adam")
self.assertEquals(update_mock.call_count, 1)
self.assertEquals(update_mock.call_args[1]['w'], 2)
# test MajorityThing update gets w=majority
with mock.patch.object(MajorityThing._pymongo(), "update") as update_mock:
mt.set(name="Adam")
self.assertEquals(update_mock.call_count, 1)
self.assertEquals(update_mock.call_args[1]['w'], "majority")
# test NormalThing update gets w=1
with mock.patch.object(NormalThing._pymongo(), "update") as update_mock:
nt.set(name="Adam")
self.assertEquals(update_mock.call_count, 1)
self.assertEquals(update_mock.call_args[1]['w'], 1)
def test_by_id_key(self):
class UnshardedCollection(Document):
pass
class IdShardedCollection(Document):
meta = {'hash_field': 'id'}
class NonIdShardedCollection(Document):
meta = {'hash_field': 'name'}
name = mongoengine.fields.StringField()
doc_id = bson.ObjectId()
# unsharded and non-ID sharded collections don't have anything injected
self.assertEquals(UnshardedCollection._by_id_key(doc_id),
{'_id': doc_id})
self.assertEquals(NonIdShardedCollection._by_id_key(doc_id),
{'_id': doc_id})
# ID-sharded collections get the hash injected
self.assertEquals(IdShardedCollection._by_id_key(doc_id),
{'_id': doc_id,
'shard_hash': IdShardedCollection._hash(doc_id)})
def test_by_ids_key(self):
class UnshardedCollection(Document):
pass
class IdShardedCollection(Document):
meta = {'hash_field': 'id'}
class NonIdShardedCollection(Document):
meta = {'hash_field': 'name'}
name = mongoengine.fields.StringField()
doc_ids = [bson.ObjectId() for i in xrange(5)]
# unsharded and non-ID sharded collections don't have anything injected
self.assertEquals(UnshardedCollection._by_ids_key(doc_ids),
{'_id': {'$in': doc_ids}})
self.assertEquals(NonIdShardedCollection._by_ids_key(doc_ids),
{'_id': {'$in': doc_ids}})
# ID-sharded collections get the hash injected
doc_hashes = [IdShardedCollection._hash(doc_id) for doc_id in doc_ids]
self.assertEquals(IdShardedCollection._by_ids_key(doc_ids),
{'_id': {'$in': doc_ids},
'shard_hash': {'$in': doc_hashes}})
# unsharded and non-ID sharded collections don't have anything injected
self.assertEquals(UnshardedCollection._by_ids_key([]),
{'_id': {'$in': []}})
self.assertEquals(NonIdShardedCollection._by_ids_key([]),
{'_id': {'$in': []}})
# ID-sharded collections get the hash injected
self.assertEquals(IdShardedCollection._by_ids_key([]),
{'_id': {'$in': []},
'shard_hash': {'$in': []}})
def test_can_pickle(self):
person = Citizen(age=20)
person.save()
pickled = cPickle.dumps(person)
restored = cPickle.loads(pickled)
self.assertEqual(person, restored)
self.assertEqual(person.age, restored.age)
def test_find_raw_max_time_ms(self):
cur, _ = Citizen.find_raw({}, max_time_ms=None, limit=1)
self.assertEquals(cur._Cursor__max_time_ms, Citizen.MAX_TIME_MS)
cur, _ = Citizen.find_raw({}, max_time_ms=0, limit=1)
self.assertIsNone(cur._Cursor__max_time_ms)
cur, _ = Citizen.find_raw({}, max_time_ms=-1, limit=1)
self.assertIsNone(cur._Cursor__max_time_ms)
cur, _ = Citizen.find_raw({}, max_time_ms=1000, limit=1)
self.assertEquals(cur._Cursor__max_time_ms, 1000)
def test_max_time_ms_find(self):
col_mock = Mock()
col_mock.name = 'asdf'
doc_mock = MagicMock()
doc_mock.__iter__.return_value = ['a','b']
cur_mock = Mock()
cur_mock.collection = col_mock
cur_mock.next = MagicMock(side_effect=[doc_mock])
find_raw = MagicMock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
Citizen.find({}, max_time_ms=None)
Citizen.find({}, max_time_ms=0)
Citizen.find({}, max_time_ms=-1)
Citizen.find({}, max_time_ms=1000)
a,b,c,d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],None)
self.assertEquals(b[1]['max_time_ms'],0)
self.assertEquals(c[1]['max_time_ms'],-1)
self.assertEquals(d[1]['max_time_ms'],1000)
def test_max_time_ms_find_iter(self):
cur_mock = MagicMock()
cur_mock._iterate_cursor = MagicMock(side_effect=['a'])
find_raw = MagicMock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
Citizen._from_augmented_son = MagicMock(return_value=None)
Citizen.find_iter({}, max_time_ms=None).next()
Citizen.find_iter({}, max_time_ms=0).next()
Citizen.find_iter({}, max_time_ms=-1).next()
Citizen.find_iter({}, max_time_ms=1000).next()
a,b,c,d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],None)
self.assertEquals(b[1]['max_time_ms'],0)
self.assertEquals(c[1]['max_time_ms'],-1)
self.assertEquals(d[1]['max_time_ms'],1000)
def test_max_time_ms_find_one(self):
find_raw = MagicMock(return_value=(None, None))
Citizen.find_raw = find_raw
Citizen.find_one({}, max_time_ms=None)
Citizen.find_one({}, max_time_ms=0)
Citizen.find_one({}, max_time_ms=-1)
Citizen.find_one({}, max_time_ms=1000)
a,b,c,d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],None)
self.assertEquals(b[1]['max_time_ms'],0)
self.assertEquals(c[1]['max_time_ms'],-1)
self.assertEquals(d[1]['max_time_ms'],1000)
def test_max_time_ms_count(self):
cur_mock = Mock()
cur_mock.count = MagicMock(return_value=1)
find_raw = Mock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
Citizen.count({}, max_time_ms=None)
Citizen.count({}, max_time_ms=0)
Citizen.count({}, max_time_ms=-1)
Citizen.count({}, max_time_ms=1000)
a,b,c,d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],None)
self.assertEquals(b[1]['max_time_ms'],0)
self.assertEquals(c[1]['max_time_ms'],-1)
self.assertEquals(d[1]['max_time_ms'],1000)
def test_max_time_ms_distinct(self):
cur_mock = Mock()
cur_mock.distinct = MagicMock(return_value=1)
find_raw = Mock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
Citizen.distinct({}, '_id', max_time_ms=None)
Citizen.distinct({}, '_id', max_time_ms=0)
Citizen.distinct({}, '_id', max_time_ms=-1)
Citizen.distinct({}, '_id', max_time_ms=1000)
a,b,c,d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],None)
self.assertEquals(b[1]['max_time_ms'],0)
self.assertEquals(c[1]['max_time_ms'],-1)
self.assertEquals(d[1]['max_time_ms'],1000)
def test_timeout_value_find(self):
col_mock = Mock()
col_mock.name = 'asdf'
doc_mock = MagicMock()
doc_mock.__iter__.return_value = ['a','b']
cur_mock = Mock()
cur_mock.collection = col_mock
cur_mock.next = MagicMock(
side_effect=pymongo.errors.ExecutionTimeout('asdf'))
find_raw = MagicMock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
self.assertEquals([],Citizen.find({}, timeout_value=[]))
self.assertEquals({},Citizen.find({}, timeout_value={}))
self.assertEquals(1,Citizen.find({}, timeout_value=1))
self.assertEquals('asdf',Citizen.find({}, timeout_value='asdf'))
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find({})
def test_timeout_value_find_one(self):
find_raw = MagicMock(return_value=(MagicMock(),Mock()))
from_augmented_son = MagicMock(
side_effect=pymongo.errors.ExecutionTimeout('asdf'))
Citizen._from_augmented_son = from_augmented_son
Citizen.find_raw = find_raw
self.assertEquals([],Citizen.find_one({}, timeout_value=[]))
self.assertEquals({},Citizen.find_one({}, timeout_value={}))
self.assertEquals(1,Citizen.find_one({}, timeout_value=1))
self.assertEquals('asdf',Citizen.find_one({}, timeout_value='asdf'))
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find_one({})
def test_timeout_value_count(self):
cur_mock = Mock()
cur_mock.count = MagicMock(
side_effect=pymongo.errors.ExecutionTimeout('asdf'))
find_raw = Mock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
self.assertEquals([],Citizen.count({}, timeout_value=[]))
self.assertEquals({},Citizen.count({}, timeout_value={}))
self.assertEquals(1,Citizen.count({}, timeout_value=1))
self.assertEquals('asdf',Citizen.count({}, timeout_value='asdf'))
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.count({})
def test_timeout_value_distinct(self):
cur_mock = Mock()
cur_mock.distinct = MagicMock(
side_effect=pymongo.errors.ExecutionTimeout('asdf'))
find_raw = Mock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
self.assertEquals([],Citizen.distinct({}, '_id', timeout_value=[]))
self.assertEquals({},Citizen.distinct({}, '_id', timeout_value={}))
self.assertEquals(1,Citizen.distinct({}, '_id', timeout_value=1))
self.assertEquals('asdf',Citizen.distinct({}, '_id', timeout_value='asdf'))
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.distinct({}, '_id')
def test_timeout_retry_find(self):
col_mock = Mock()
col_mock.name = 'asdf'
doc_mock = MagicMock()
doc_mock.__iter__.return_value = ['a','b']
cur_mock = Mock()
cur_mock.collection = col_mock
cur_mock.next = MagicMock(
side_effect=pymongo.errors.ExecutionTimeout('asdf'))
find_raw = MagicMock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find({}, max_time_ms=None)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find({}, max_time_ms=Citizen.MAX_TIME_MS - 1)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find({}, max_time_ms=Citizen.MAX_TIME_MS)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find({}, max_time_ms=Citizen.MAX_TIME_MS + 1)
# should retry on the first two, should not retry on the last two
self.assertEquals(len(find_raw.call_args_list), 6)
_, a, _, b, c, d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],Citizen.RETRY_MAX_TIME_MS)
self.assertEquals(b[1]['max_time_ms'],Citizen.RETRY_MAX_TIME_MS)
def test_timeout_retry_find_one(self):
find_raw = MagicMock(return_value=(MagicMock(),Mock()))
from_augmented_son = MagicMock(
side_effect=pymongo.errors.ExecutionTimeout('asdf'))
Citizen._from_augmented_son = from_augmented_son
Citizen.find_raw = find_raw
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find_one({}, max_time_ms=None)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find_one({}, max_time_ms=Citizen.MAX_TIME_MS - 1)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find_one({}, max_time_ms=Citizen.MAX_TIME_MS)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.find_one({}, max_time_ms=Citizen.MAX_TIME_MS + 1)
# should retry on the first two, should not retry on the last two
self.assertEquals(len(find_raw.call_args_list), 6)
_, a, _, b, c, d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],Citizen.RETRY_MAX_TIME_MS)
self.assertEquals(b[1]['max_time_ms'],Citizen.RETRY_MAX_TIME_MS)
def test_timeout_retry_count(self):
cur_mock = Mock()
cur_mock.count = MagicMock(
side_effect=pymongo.errors.ExecutionTimeout('asdf'))
find_raw = Mock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.count({}, max_time_ms=None)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.count({}, max_time_ms=Citizen.MAX_TIME_MS - 1)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.count({}, max_time_ms=Citizen.MAX_TIME_MS)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.count({}, max_time_ms=Citizen.MAX_TIME_MS + 1)
# should retry on the first two, should not retry on the last two
self.assertEquals(len(find_raw.call_args_list), 6)
_, a, _, b, c, d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],Citizen.RETRY_MAX_TIME_MS)
self.assertEquals(b[1]['max_time_ms'],Citizen.RETRY_MAX_TIME_MS)
def test_timeout_retry_distinct(self):
cur_mock = Mock()
cur_mock.distinct = MagicMock(
side_effect=pymongo.errors.ExecutionTimeout('asdf'))
find_raw = Mock(return_value=(cur_mock,Mock()))
Citizen.find_raw = find_raw
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.distinct({}, '_id', max_time_ms=None)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.distinct({}, '_id', max_time_ms=Citizen.MAX_TIME_MS - 1)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.distinct({}, '_id', max_time_ms=Citizen.MAX_TIME_MS)
with self.assertRaises(pymongo.errors.ExecutionTimeout):
Citizen.distinct({},'_id', max_time_ms=Citizen.MAX_TIME_MS + 1)
# should retry on the first two, should not retry on the last two
self.assertEquals(len(find_raw.call_args_list), 6)
_, a, _, b, c, d = find_raw.call_args_list
self.assertEquals(a[1]['max_time_ms'],Citizen.RETRY_MAX_TIME_MS)
self.assertEquals(b[1]['max_time_ms'],Citizen.RETRY_MAX_TIME_MS)
if __name__ == '__main__':
unittest.main()
|
ContextLogic/mongoengine
|
tests/document.py
|
Python
|
mit
| 44,899
|
[
"exciting"
] |
44d68a167af8e23441dd6b4f771df060846e6fbaab0513e70f15e9c96f506160
|
#
# script to calculate tangential and sagittal focusing radii for crystals
# and Rowland condition for gratings and asymmetric crystals
#
import xraylib
import numpy
import scipy.constants as codata
tocm = codata.h*codata.c/codata.e*1e2
#
# define miller indices, distances and photon energy in eV
#
hh = 1
kk = 1
ll = 1
d1 = 3000.0
d2 = 3000.0
alpha = 5.0 * numpy.pi / 180.0 # asymmetry angle in rad
crystal_name = "Si"
photon_energy_ev = 10000.0
#
# get crystal info from xraylib
#
cryst = xraylib.Crystal_GetCrystal(crystal_name)
dspacing = xraylib.Crystal_dSpacing(cryst,hh,kk,ll )
#sin_theta = (tocm/(photon_energy_ev*2.0*dspacing*1e-8));
#rt=2*p*q/(p+q)/sin_theta;
#rs=2*p*q*sin_theta/(p+q);
theta=numpy.arcsin(tocm/(photon_energy_ev*2.0*dspacing*1e-8))
t1 = theta + alpha
t2 = theta - alpha
#calculations
s1 = numpy.sin(t1)
s2 = numpy.sin(t2)
s1_2 = s1*s1
s2_2 = s2*s2
r = s1_2/d1 + s2_2/d2
r = (s1+s2)/r
rs = (s1+s2)/(1.0/d1 + 1.0/d2)
print("Using crystal: %s %d%d%d at E=%.3f eV"%(crystal_name,hh,kk,ll,photon_energy_ev))
print(" dspacing: %f A"%dspacing)
print(" initial p=%.3f, q=%.3f"%(d1,d1))
print("Results: ")
print(" BraggAngle=%.3f deg"%(theta*180/numpy.pi))
print(" IncAngle=%.3f deg, RefAngle=%.3f deg"%(t1*180/numpy.pi,t2*180/numpy.pi))
print(" Rtangential = %f, Rsagittal=%f"%(r,rs))
print(" ROWLAND condition: ")
print(" rowland p=R1=%.3f, q=R2=%.3f"%(r*s1,r*s1))
print(" For p=R1=%.3f the ROWLAND condition is: "%(d1))
print(" q=R2=%.3f, R=%.3f"%(d1*s2/s1,d1/s1))
|
srio/ShadowOui-Tutorial
|
SCRIPTS/script22.py
|
Python
|
mit
| 1,616
|
[
"CRYSTAL"
] |
888d1df258a438c0199af92990b0a0793566f928b599bce205e4b1bbfd504c36
|
from discord.ext import commands
import urllib.request
import json
import math
from .utils import checks
from __main__ import send_cmd_help, settings
from .utils.dataIO import fileIO, dataIO
class Elite():
def __init__(self,bot):
self.bot = bot
self.cmdr_list = dataIO.load_json("data/mod/cmdr.json")
@commands.command()
async def dist(self, *, everything):
"""Indicates distance between two systems. (ex. ?dist HR 1257, HR 1254)"""
try:
sys1, sys2 = everything.split(", ")
sys1plus = sys1.replace(" ", "+")
sys2plus = sys2.replace(" ", "+")
base1 = 'https://www.edsm.net/api-v1/system?sysname=' + sys1plus + '&coords=1'
base2 = 'https://www.edsm.net/api-v1/system?sysname=' + sys2plus + '&coords=1'
readit1 = urllib.request.urlopen(base1)
readit2 = urllib.request.urlopen(base2)
dataedsm1 = json.loads(readit1.read().decode('utf-8'))
dataedsm2 = json.loads(readit2.read().decode('utf-8'))
datax1, datay1, dataz1 = dataedsm1['coords']['x'], dataedsm1['coords']['y'], dataedsm1['coords']['z']
datax2, datay2, dataz2 = dataedsm2['coords']['x'], dataedsm2['coords']['y'], dataedsm2['coords']['z']
x = float(datax2 - datax1)
y = float(datay2 - datay1)
z = float(dataz2 - dataz1)
distance = math.sqrt(x * x + y * y + z * z)
await self.bot.say("The distance between " + sys1 + " and " + sys2 + " is " + "{0:.2F}".format(distance) + "ly.")
readit1.close()
readit2.close()
except TypeError:
await self.bot.say("**ERROR!** Please make sure you spelled the systems correctly. Not every system has been recorded, so not every system will work with this command. ")
except ValueError:
await self.bot.say("**ERROR!** There needs to be a space after the comma!")
@commands.group(pass_context=True)
async def member(self, ctx):
"""Checks if a CMDR is part of Simbad"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@member.command(name="check")
async def _member_check(self, cmdr: str):
cmdr = cmdr.lower()
if cmdr not in self.cmdr_list["members"]:
await self.bot.say("Nope. Make sure you've spelled the CMDR name correctly and that CMDR names with more than one word are put into double quotes."
"(ex. ?member check \"b. horn\") **If your CMDR name isn't added to the command yet, contact an admin.**")
else:
await self.bot.say("Yes. " +cmdr +" is a member of Simbad!")
@member.command(name="add")
@checks.recruiter_or_permissions(manage_messages=True)
async def _member_add(self, cmdra: str):
if cmdra.lower() not in self.cmdr_list["members"]:
self.cmdr_list["members"].append(cmdra.lower())
fileIO("data/mod/cmdr.json","save", self.cmdr_list)
await self.bot.say("CMDR " + cmdra + " has been added to the command.")
@commands.command()
async def ship(self,*,shipname):
"""Provides ship price and coriolis link (ex. ?ship federal corvette)"""
shipname = shipname.lower()
corio = [("adder", "87,810", "https://coriolis.io/outfit/adder/"),
("anaconda", "146,969,450", "https://coriolis.io/outfit/anaconda/"),
("conda", "146,969,450", "https://coriolis.io/outfit/anaconda/"),
("asp explorer", "6,661,150", "https://coriolis.io/outfit/asp/"),
("asp e", "6,661,150", "https://coriolis.io/outfit/asp/"),
("aspe", "6,661,150", "https://coriolis.io/outfit/asp/"),
("asp scout", "3,961,150", "https://coriolis.io/outfit/asp_scout/"),
("cobra mk iii", "349,720", "https://coriolis.io/outfit/cobra_mk_iii/"),
("cobra mkiii", "349,720", "https://coriolis.io/outfit/cobra_mk_iii/"),
("cobra mk iv", "747,660", "https://coriolis.io/outfit/cobra_mk_iv/"),
("cobra mkiv", "747,660", "https://coriolis.io/outfit/cobra_mk_iv/"),
("diamondback explorer", "1,894,760", "https://coriolis.io/outfit/diamondback_explorer/"),
("dbe", "1,894,760", "https://coriolis.io/outfit/diamondback_explorer/"),
("diamondbacke", "1,894,760", "https://coriolis.io/outfit/diamondback_explorer/"),
("diamondback scout", "564,330", "https://coriolis.io/outfit/diamondback/"),
("dbs", "564,330", "https://coriolis.io/outfit/diamondback/"),
("diamondbacks", "564,330", "https://coriolis.io/outfit/diamondback/"),
("eagle", "44,800", "https://coriolis.io/outfit/eagle/"),
("federal assault ship", "19,814,210", "https://coriolis.io/outfit/federal_assault_ship/"),
("fas", "19,814,210", "https://coriolis.io/outfit/federal_assault_ship/"),
("assault ship", "19,814,210", "https://coriolis.io/outfit/federal_assault_ship/"),
("federal corvette", "187,969,450", "https://coriolis.io/outfit/federal_corvette/"),
("corvette", "187,969,450", "https://coriolis.io/outfit/federal_corvette/"),
("federal dropship", "14,314,210", "https://coriolis.io/outfit/federal_dropship/"),
("dropship", "14,314,210", "https://coriolis.io/outfit/federal_dropship/"),
("federal gunship", "35,814,210", "https://coriolis.io/outfit/federal_gunship/"),
("gunship", "35,814,210", "https://coriolis.io/outfit/federal_gunship/"),
("fer de lance", "51,703,780", "https://coriolis.io/outfit/fer_de_lance/"),
("fer-de-lance", "51,703,780", "https://coriolis.io/outfit/fer_de_lance/"),
("fdl", "51,703,780", "https://coriolis.io/outfit/fer_de_lance/"),
("hauler", "52,730", "https://coriolis.io/outfit/hauler/"),
("imperial clipper", "22,296,450", "https://coriolis.io/outfit/imperial_clipper/"),
("iclipper", "22,296,450", "https://coriolis.io/outfit/imperial_clipper/"),
("clipper", "22,296,450", "https://coriolis.io/outfit/imperial_clipper/"),
("imperial courier", "2,542,930", "https://coriolis.io/outfit/imperial_courier/"),
("icourier", "2,542,930", "https://coriolis.io/outfit/imperial_courier/"),
("courier", "2,542,930", "https://coriolis.io/outfit/imperial_courier/"),
("imperial cutter", "208,969,860", "https://coriolis.io/outfit/imperial_cutter/"),
("icutter", "208,969,860", "https://coriolis.io/outfit/imperial_cutter/"),
("cutter", "208,969,860", "https://coriolis.io/outfit/imperial_cutter/"),
("imperial eagle", "110,830", "https://coriolis.io/outfit/imperial_eagle/"),
("ieagle", "110,830", "https://coriolis.io/outfit/imperial_eagle/"),
("keelback", "3,126,150", "https://coriolis.io/outfit/keelback/"),
("orca", "48,539,890", "https://coriolis.io/outfit/orca/"),
("python", "56,978,180", "https://coriolis.io/outfit/python/"),
("sidewinder", "40,800", "https://coriolis.io/outfit/sidewinder/"),
("type-6", "1,045,950", "https://coriolis.io/outfit/type_6_transporter/"),
("type 6", "1,045,950", "https://coriolis.io/outfit/type_6_transporter/"),
("type6", "1,045,950", "https://coriolis.io/outfit/type_6_transporter/"),
("type-7", "17,472,250", "https://coriolis.io/outfit/type_7_transport/"),
("type 7", "17,472,250", "https://coriolis.io/outfit/type_7_transport/"),
("type7", "17,472,250", "https://coriolis.io/outfit/type_7_transport/"),
("type-9", "76,555,840", "https://coriolis.io/outfit/type_9_heavy/"),
("type 9", "76,555,840", "https://coriolis.io/outfit/type_9_heavy/"),
("type9", "76,555,840", "https://coriolis.io/outfit/type_9_heavy/"),
("cow", "76,555,840", "https://coriolis.io/outfit/type_9_heavy/"),
("viper", "142,930", "https://coriolis.io/outfit/viper/"),
("viper mk iii", "142,930", "https://coriolis.io/outfit/viper/"),
("viper mk iv", "437,930", "https://coriolis.io/outfit/viper_mk_iv/"),
("vulture", "4,925,620", "https://coriolis.io/outfit/vulture/")]
for x, y, z in corio:
if shipname == x:
await self.bot.say("The " + x + " costs " + y + "cr. \n" + z)
break
def setup(bot):
bot.add_cog(Elite(bot))
|
b0r3d0/kitty
|
cogs/elite.py
|
Python
|
gpl-3.0
| 8,850
|
[
"ORCA"
] |
a93e7a42ee3e9252c4265f460ec584168312d6325df82e16069c9168f9928908
|
#!/usr/bin/env python
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
## from distutils.core import setup
setup(name='topoflow',
version='3.4',
description='d8-based, spatial hydrologic model',
author='Scott D. Peckham',
author_email='Scott.Peckham@colorado.edu',
license='MIT',
url='http://csdms.colorado.edu/wiki/Model:TopoFlow',
packages=['topoflow',
'topoflow.components',
'topoflow.components.tests',
'topoflow.examples',
'topoflow.framework', # (later in REQUIRES)
'topoflow.framework.tests',
'topoflow.gui', # (11/8/13)
'topoflow.utils', # (later in REQUIRES)
'topoflow.utils.tests'],
install_requires=['numpy', 'scipy', 'h5py', 'netCDF4'],
entry_points={
'console_scripts': [
'topoflow = topoflow.components.topoflow:main',
]
},
#-------------------------------------------------------
# There is debate online about the pros and cons of
# using "install_requires" since it can interfere with
# a user's existing installed packages.
#-------------------------------------------------------
# PyNIO allows reading and writing of NetCDF files.
# scimath.units (from Canopy) allows unit conversion.
#-------------------------------------------------------
# Right now, the topoflow package includes subpackages
# called "utils" and "framework" that may later be
# distributed as separate Python packages.
#-------------------------------------------------------
# install_requires=["numpy","scimath","PyNIO"],
# provides=[],
# obsoletes=[]
# package_data={'':['']},
# data_files=[], # (include topoflow.examples here instead?)
# test_suite='topoflow.tests',
)
|
mdpiper/topoflow
|
setup.py
|
Python
|
mit
| 1,969
|
[
"NetCDF"
] |
2ae82b4b8f0ff883c5091e99766c084ca21929f6b022223c3a0bf7fff0cb0dd6
|
__source__ = 'https://leetcode.com/problems/letter-case-permutation/'
# Time: O(2^{N} * N)
# Space: O(2^{N} * N)
#
# Description: Leetcode # 784. Letter Case Permutation
#
# Given a string S, we can transform every letter individually to be lowercase or uppercase
# to create another string. Return a list of all possible strings we could create.
#
# Examples:
# Input: S = "a1b2"
# Output: ["a1b2", "a1B2", "A1b2", "A1B2"]
#
# Input: S = "3z4"
# Output: ["3z4", "3Z4"]
#
# Input: S = "12345"
# Output: ["12345"]
# Note:
#
# S will be a string with length between 1 and 12.
# S will consist only of letters or digits.
import unittest
import itertools
# Time and Space Complexity: O(2^{N} * N)
# 104ms 22.31%
class Solution(object):
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
f = lambda x: (x.lower(), x.upper()) if x.isalpha() else x
return map("".join, itertools.product(*map(f, S)))
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/letter-case-permutation/solution/
# 5ms 96,17%
class Solution {
public List<String> letterCasePermutation(String S) {
List<String> res = new ArrayList<>();
char[] chr = S.toCharArray();
Visit(res, chr, 0);
return res;
}
private void Visit(List<String> res, char[] chr, int start) {
res.add(new String(chr));
for (int i = start; i < chr.length; i++) {
char c = chr[i];
if (c <= 'z' && c >= 'a') {
chr[i] = (char)(c - 'a' + 'A');
Visit(res, chr, i + 1);
chr[i] = c;
}else if (c >= 'A' && c <= 'Z') {
chr[i] = (char)(c - 'A' + 'a');
Visit(res, chr, i + 1);
chr[i] = c;
}
}
}
}
Approach #1: Recursion [Accepted]
# 8ms 64.84%
class Solution {
public List<String> letterCasePermutation(String S) {
List<StringBuilder> res = new ArrayList<>();
res.add(new StringBuilder());
for (char c: S.toCharArray()) {
int n = res.size();
if (Character.isLetter(c)) {
for (int i = 0; i < n; i++) {
res.add(new StringBuilder(res.get(i)));
res.get(i).append(Character.toLowerCase(c));
res.get(n + i).append(Character.toUpperCase(c));
}
} else {
for (int i = 0; i < n; i++) {
res.get(i).append(c);
}
}
}
List<String> ans = new ArrayList<>();
for (StringBuilder sb : res) {
ans.add(sb.toString());
}
return ans;
}
}
Approach #2: Binary Mask [Accepted]
# 21ms 12.14%
class Solution {
public List<String> letterCasePermutation(String S) {
int B = 0;
for (char c: S.toCharArray()) {
if (Character.isLetter(c)) B++;
}
List<String> res = new ArrayList<>();
for (int bits = 0; bits < 1<<B; bits++) {
int b = 0;
StringBuilder sb = new StringBuilder();
for (char c : S.toCharArray()) {
if (Character.isLetter(c)) {
if (((bits >> b++) & 1 ) == 1) {
sb.append(Character.toLowerCase(c));
} else {
sb.append(Character.toUpperCase(c));
}
} else {
sb.append(c);
}
}
res.add(sb.toString());
}
return res;
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/LetterCasePermutation.py
|
Python
|
apache-2.0
| 3,758
|
[
"VisIt"
] |
849ecb7e088dff5ad4b474f6fa286161887201a0f6de1395bf7f996ae609c53e
|
"""Convert three terms recurrence coefficients into quadrature rules."""
import numpy
import scipy.linalg
def coefficients_to_quadrature(coeffs):
"""
Construct Gaussian quadrature abscissas and weights from three terms
recurrence coefficients.
Examples:
>>> distribution = chaospy.Normal(0, 1)
>>> coeffs, = chaospy.construct_recurrence_coefficients(4, distribution)
>>> coeffs
array([[0., 0., 0., 0., 0.],
[1., 1., 2., 3., 4.]])
>>> (abscissas,), (weights,) = chaospy.coefficients_to_quadrature(coeffs)
>>> abscissas.round(4)
array([-2.857 , -1.3556, -0. , 1.3556, 2.857 ])
>>> weights.round(4)
array([0.0113, 0.2221, 0.5333, 0.2221, 0.0113])
"""
coeffs = numpy.asfarray(coeffs)
if len(coeffs.shape) == 2:
coeffs = coeffs.reshape(1, 2, -1)
assert len(coeffs.shape) == 3, "shape %s not allowed" % coeffs.shape
assert coeffs.shape[-1] >= 1
abscissas = []
weights = []
for coeff in coeffs:
if numpy.any(coeff[1] < 0) or numpy.any(numpy.isnan(coeff)):
raise numpy.linalg.LinAlgError(
"Invalid recurrence coefficients can not be used for "
"constructing Gaussian quadrature rule")
order = len(coeff[0])
bands = numpy.zeros((2, order))
bands[0, :] = coeff[0, :order]
bands[1, :-1] = numpy.sqrt(coeff[1, 1:order])
vals, vecs = scipy.linalg.eig_banded(bands, lower=True)
abscissa, weight = vals.real, vecs[0, :]**2
indices = numpy.argsort(abscissa)
abscissa, weight = abscissa[indices], weight[indices]
abscissas.append(abscissa)
weights.append(weight)
return abscissas, weights
|
jonathf/chaospy
|
chaospy/recurrence/jacobi.py
|
Python
|
mit
| 1,764
|
[
"Gaussian"
] |
ea3f72f21e46d69d08c745843b547f039270cb2395763bad499268e9007e0cd0
|
import pdb
import numpy as np
import cPickle as pickle
from utils import black
from utils import loggen
#from astropy.cosmology import FlatLambdaCDM
from astropy.cosmology import Planck15 as cosmo
import astropy.units as u
from lmfit import Parameters, minimize, fit_report
L_sun = 3.839e26 # W
c = 299792458.0 # m/s
def find_nearest_index(array_in,value):
ng = len(value)
#idx = (np.abs(array_in-value)).argmin()
idx = (np.abs(array_in-np.reshape(value,(ng,1)))).argmin(axis=1)
return idx
def sed(p, nu_in, T, betain, alphain):
'''
#m = [A, T, Beta, Alpha] - return SED (array) in Jy
#P[0] = A
#P[1] = T
#P[2] = Beta
#P[3] = Alpha
'''
v = p.valuesdict()
A0= v['Ain']
A=np.asarray(A0)
#T = v['Tin']
#betain = v['betain']
#alphain = v['alphain']
ng = np.size(A)
ns = len(nu_in)
base = 2.0 * (6.626)**(-2.0 - betain - alphain) * (1.38)**(3. + betain + alphain) / (2.99792458)**2.0
expo = 34.0 * (2.0 + betain + alphain) - 23.0 * (3.0 + betain + alphain) - 16.0 + 26.0
K = base * 10.0**expo
w_num = A * K * (T * (3.0 + betain + alphain))**(3.0 + betain + alphain)
w_den = (np.exp(3.0 + betain + alphain) - 1.0)
w_div = w_num/w_den
nu_cut = (3.0 + betain + alphain) * 0.208367e11 * T
graybody = np.reshape(A,(ng,1)) * nu_in**np.reshape(betain,(ng,1)) * black(nu_in, T) / 1000.0
powerlaw = np.reshape(w_div,(ng,1)) * nu_in**np.reshape(-1.0 * alphain,(ng,1))
graybody[np.where(nu_in >= np.reshape(nu_cut,(ng,1)))]=powerlaw[np.where(nu_in >= np.reshape(nu_cut,(ng,1)))]
return graybody
def sed_direct(A, nu_in, T, betain, alphain):
'''
'''
ng = np.size(A)
ns = len(nu_in)
base = 2.0 * (6.626)**(-2.0 - betain - alphain) * (1.38)**(3. + betain + alphain) / (2.99792458)**2.0
expo = 34.0 * (2.0 + betain + alphain) - 23.0 * (3.0 + betain + alphain) - 16.0 + 26.0
K = base * 10.0**expo
w_num = A * K * (T * (3.0 + betain + alphain))**(3.0 + betain + alphain)
w_den = (np.exp(3.0 + betain + alphain) - 1.0)
w_div = w_num/w_den
nu_cut = (3.0 + betain + alphain) * 0.208367e11 * T
#graybody = np.reshape(A,(ng,1)) * nu_in**np.reshape(np.repeat(betain,ng),[ng,1]) * black(nu_in, T) / 1000.0
#powerlaw = np.reshape(w_div,(ng,1)) * nu_in**np.reshape(np.repeat(alphain,ng),[ng,1])
graybody = np.reshape(A,(ng,1)) * nu_in**betain * black(nu_in, T) / 1000.0
powerlaw = np.reshape(w_div,(ng,1)) * nu_in**(-1.0 * alphain)
graybody[np.where(nu_in >= np.reshape(nu_cut,(ng,1)))]=powerlaw[np.where(nu_in >= np.reshape(nu_cut,(ng,1)))]
return graybody
def sedint(p, nu_in, Lir, T, betain, alphain):
'''
#m = [A, T, Beta, Alpha] - return integrated SED flux (one number) in Jy x Hz
#P[0] = A
#P[1] = T
#P[2] = Beta
#P[3] = Alpha
'''
v = p.valuesdict()
A0 = v['Ain']
A=np.asarray(A0)
#pdb.set_trace()
#T = v['Tin']
#betain = v['betain']
#alphain = v['alphain']
#print 'A is ' + str(A)
ns = len(nu_in)
#pdb.set_trace()
ng = np.size(A)
base = 2.0 * (6.626)**(-2.0 - betain - alphain) * (1.38)**(3. + betain + alphain) / (2.99792458)**2.0
expo = 34.0 * (2.0 + betain + alphain) - 23.0 * (3.0 + betain + alphain) - 16.0 + 26.0
K = base * 10.0**expo
w_num = A * K * (T * (3.0 + betain + alphain))**(3.0 + betain + alphain)
w_den = (np.exp(3.0 + betain + alphain) - 1.0)
w_div = w_num/w_den
nu_cut = (3.0 + betain + alphain) * 0.208367e11 * T
#nu_cut_ind = find_nearest_index(nu_in,nu_cut)
graybody = np.reshape(A,(ng,1)) * nu_in**np.reshape(betain,(ng,1)) * black(nu_in, T) / 1000.0
powerlaw = np.reshape(w_div,(ng,1)) * nu_in**np.reshape(-1.0 * alphain,(ng,1))
graybody[np.where(nu_in >= np.reshape(nu_cut,(ng,1)))]=powerlaw[np.where(nu_in >= np.reshape(nu_cut,(ng,1)))]
#pdb.set_trace()
dnu = nu_in[1:ns] - nu_in[0:ns-1]
dnu = np.append(dnu[0],dnu)
return np.ravel([np.sum(graybody * dnu, axis=1) - Lir])
def sedint2(p, nu_in, Lir, ng): # m = [A, T, Beta, Alpha] - return integrated SED flux (one number) in Jy x Hz
#P[0] = A
#P[1] = T
#P[2] = Beta
#P[3] = Alpha
v = p.valuesdict()
A = v['Ain']
T = v['Tin']
betain = v['betain']
alphain = v['alphain']
print 'A is ' + str(A)
ns = len(nu_in)
#ng = len(A)
base = 2.0 * (6.626)**(-2.0 - betain - alphain) * (1.38)**(3. + betain + alphain) / (2.99792458)**2.0
expo = 34.0 * (2.0 + betain + alphain) - 23.0 * (3.0 + betain + alphain) - 16.0 + 26.0
K = base * 10.0**expo
w_num = A * K * (T * (3.0 + betain + alphain))**(3.0 + betain + alphain)
w_den = (np.exp(3.0 + betain + alphain) - 1.0)
w_div = w_num/w_den
nu_cut = (3.0 + betain + alphain) * 0.208367e11 * T
#nu_cut_ind = find_nearest_index(nu_in,nu_cut)
graybody = np.reshape(A,(ng,1)) * nu_in**np.reshape(betain,(ng,1)) * black(nu_in, T) / 1000.0
powerlaw = np.reshape(w_div,(ng,1)) * nu_in**np.reshape(-1.0 * alphain,(ng,1))
graybody[np.where(nu_in >= np.reshape(nu_cut,(ng,1)))]=powerlaw[np.where(nu_in >= np.reshape(nu_cut,(ng,1)))]
#pdb.set_trace()
dnu = nu_in[1:ns] - nu_in[0:ns-1]
dnu = np.append(dnu[0],dnu)
return np.ravel([np.sum(graybody * dnu, axis=1) - Lir])
def simple_flux_from_greybody(lambdavector, Trf = None, b = None, Lrf = None, zin = None, ngal = None):
'''
Return flux densities at any wavelength of interest (in the range 1-10000 micron),
assuming a galaxy (at given redshift) graybody spectral energy distribution (SED),
with a power law replacing the Wien part of the spectrum to account for the
variability of dust temperatures within the galaxy. The two different functional
forms are stitched together by imposing that the two functions and their first
derivatives coincide. The code contains the nitty-gritty details explicitly.
Inputs:
alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003]
betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985]
Trf = rest-frame temperature [in K; default = 20K]
Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10]
zin = galaxy redshift [default = 0.001]
lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)];
AUTHOR:
Lorenzo Moncelsi [moncelsi@caltech.edu]
HISTORY:
20June2012: created in IDL
November2015: converted to Python
'''
nwv = len(lambdavector)
nuvector = c * 1.e6 / lambdavector # Hz
nsed = 1e4
lambda_mod = loggen(1e3, 8.0, nsed) # microns
nu_mod = c * 1.e6/lambda_mod # Hz
#Lorenzo's version had: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009)
#cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273)
conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz)
Lir = Lrf / conversion # Jy x Hz
Ain = np.zeros(ngal) + 1.0e-36 #good starting parameter
betain = np.zeros(ngal) + b
alphain= np.zeros(ngal) + 2.0
fit_params = Parameters()
fit_params.add('Ain', value= Ain)
#fit_params.add('Tin', value= Trf/(1.+zin), vary = False)
#fit_params.add('betain', value= b, vary = False)
#fit_params.add('alphain', value= alphain, vary = False)
#pdb.set_trace()
#THE LM FIT IS HERE
#Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal))
Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal,Trf/(1.+zin),b,alphain))
#pdb.set_trace()
flux_mJy=sed(Pfin.params,nuvector,ngal,Trf/(1.+zin),b,alphain)
return flux_mJy
def single_simple_flux_from_greybody(lambdavector, Trf = None, b = 2.0, Lrf = None, zin = None):
'''
Return flux densities at any wavelength of interest (in the range 1-10000 micron),
assuming a galaxy (at given redshift) graybody spectral energy distribution (SED),
with a power law replacing the Wien part of the spectrum to account for the
variability of dust temperatures within the galaxy. The two different functional
forms are stitched together by imposing that the two functions and their first
derivatives coincide. The code contains the nitty-gritty details explicitly.
Cosmology assumed: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009)
Inputs:
alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003]
betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985]
Trf = rest-frame temperature [in K; default = 20K]
Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10]
zin = galaxy redshift [default = 0.001]
lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)];
AUTHOR:
Lorenzo Moncelsi [moncelsi@caltech.edu]
HISTORY:
20June2012: created in IDL
November2015: converted to Python
'''
nwv = len(lambdavector)
nuvector = c * 1.e6 / lambdavector # Hz
nsed = 1e4
lambda_mod = loggen(1e3, 8.0, nsed) # microns
nu_mod = c * 1.e6/lambda_mod # Hz
#cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273)
conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz)
Lir = Lrf / conversion # Jy x Hz
Ain = 1.0e-36 #good starting parameter
betain = b
alphain= 2.0
fit_params = Parameters()
fit_params.add('Ain', value= Ain)
#THE LM FIT IS HERE
Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,Trf/(1.+zin),b,alphain))
flux_mJy=sed(Pfin.params,nuvector,Trf/(1.+zin),b,alphain)
return flux_mJy
def single_simple_rest_frame_flux_from_greybody(lambdavector, Trf = None, b = 2.0, Lrf = None, zin = None):
'''
Return flux densities at the rest-frame wavelength of interest (in the range 1-10000 micron),
assuming a galaxy (at given redshift) graybody spectral energy distribution (SED),
with a power law replacing the Wien part of the spectrum to account for the
variability of dust temperatures within the galaxy. The two different functional
forms are stitched together by imposing that the two functions and their first
derivatives coincide. The code contains the nitty-gritty details explicitly.
Cosmology assumed: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009)
Inputs:
alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003]
betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985]
Trf = rest-frame temperature [in K; default = 20K]
Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10]
zin = galaxy redshift [default = 0.001]
lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)];
AUTHOR:
Lorenzo Moncelsi [moncelsi@caltech.edu]
HISTORY:
20June2012: created in IDL
November2015: converted to Python
'''
nwv = len(lambdavector)
nuvector = (c * 1.e6 / lambdavector) / (1.+zin) # Hz
nsed = 1e4
lambda_mod = loggen(1e3, 8.0, nsed) # microns
nu_mod = c * 1.e6/lambda_mod # Hz
#cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273)
conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz)
Lir = Lrf / conversion # Jy x Hz
Ain = 1.0e-36 #good starting parameter
betain = b
alphain= 2.0
fit_params = Parameters()
fit_params.add('Ain', value= Ain)
#THE LM FIT IS HERE
Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,Trf/(1.+zin),b,alphain))
flux_mJy=sed(Pfin.params,nuvector,Trf/(1.+zin),b,alphain)
return flux_mJy
def amplitude_of_best_fit_greybody(Trf = None, b = 2.0, Lrf = None, zin = None):
'''
Same as single_simple_flux_from_greybody, but to made an amplitude lookup table
'''
nsed = 1e4
lambda_mod = loggen(1e3, 8.0, nsed) # microns
nu_mod = c * 1.e6/lambda_mod # Hz
#cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273)
conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz)
Lir = Lrf / conversion # Jy x Hz
Ain = 1.0e-36 #good starting parameter
betain = b
alphain= 2.0
fit_params = Parameters()
fit_params.add('Ain', value= Ain)
#THE LM FIT IS HERE
Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,Trf/(1.+zin),b,alphain))
#pdb.set_trace()
return Pfin.params['Ain'].value
def invert_sed_neural_net(lam, Trf, Lrf, zin, wpath = '/data/pickles/simstack/ann_function_fits/', wfile = 'SED_amplitude_weights_from_neural_network_logistic_100layers_N8000.p'):
reg = pickle.load( open( wpath + wfile, "rb" ) )
nuvector = c * 1.e6 / lam
rearrange_x = np.transpose(np.array([Trf, Lrf, zin]))
predicted_amplitude = 1e-40 * 10**reg.predict(rearrange_x)
fluxes = sed_direct(predicted_amplitude, np.array([nuvector]), Trf/(1.+zin), betain=2.0, alphain=2.0)
return fluxes
|
marcoviero/Utils
|
invert_sed.py
|
Python
|
mit
| 12,970
|
[
"Galaxy"
] |
a1b965e2ecb9359b2e549e729a15b1da723481a6e2c634f53dafea40f11809f7
|
#=======================================================================
# debug_utils.py
#=======================================================================
# Collection of utility functions used for debugging models.
from __future__ import print_function
import ...model.Model as model
import sys
import ast, _ast
import functools
def port_walk(tgt, spaces=0, o=sys.stdout):
printn = functools.partial( print, file=o )
printl = functools.partial( print, file=o, end='' )
pw = tgt._ports + tgt._wires
for x in pw:
printn( spaces*' ', x.parent.name, x.name, x )
for y in x.connections:
fullname = y.name
if y.parent:
fullname = y.parent.name+'.'+fullname
printn( spaces*' ', ' knctn: {0} {1}'.format(type(y), fullname) )
printl( spaces*' ', ' value:', x._value, #x.value )
if isinstance(x._value, model.Slice):
# TODO: handle this case in VerilogSlice instead?
if x._value._value:
printn( x.value )
else:
printn( None )
printn((spaces+1)*' ', ' slice:', x._value._value, bin(x._value.pmask))
# TODO: handle this case in VerilogSlice instead?
else:
printn( x.value )
printn()
for x in tgt._submodules:
printn( spaces*' ', x.name )
port_walk(x, spaces+3, o)
def print_connections(tgt, spaces=0, o=sys.stdout):
pw = tgt._ports + tgt._wires
for x in pw:
printn( spaces*' ', x.parent.name, x.name, x )
for y in x.node.connections:
fullname = y.name
if y.parent:
fullname = y.parent.name+'.'+fullname
printn( spaces*' ', ' knctn: {0} {1}'.format(type(y), fullname) )
printn( spaces*' ', ' value:', x.value )
if isinstance(x.node, model.Slice):
# # TODO: handle this case in VerilogSlice instead?
# printn( x.value )
printn( (spaces+1)*' ', ' slice:', x.value #, bin(x._value.pmask) )
## TODO: handle this case in VerilogSlice instead?
#else:
# printn( x.value )
printn()
for x in tgt._submodules:
printn( spaces*' ', x.name )
port_walk(x, spaces+3, o)
port_walk = print_connections
def print_ast(ast_tree):
"""Debug utility which prints the provided AST tree."""
printn( "="*35, "BEGIN AST", "="*35 )
PrintVisitor().visit( ast_tree )
printn( "="*35, " END AST ", "="*35 )
class PrintVisitor(ast.NodeVisitor):
"""AST Visitor class used by print_ast()."""
def __init__(self):
self.indent = 0
def generic_visit(self, node):
#off = "??"
#if hasattr(node, 'col_offset'):
# off = node.col_offset
#printl( "{0:2}".format(off) )
if isinstance(node, _ast.FunctionDef):
printl( "FUNCTIONDEF:" )
elif isinstance(node, _ast.arguments):
printl( "ARGUMENTS: " )
elif isinstance(node, _ast.Assign):
printl( "ASSIGN: " )
elif isinstance(node, _ast.If):
printl( "IFELSE: {0:4}".format(node.lineno) )
else:
printl( " " )
printl( self.indent*' ', node )
if isinstance(node, _ast.Module):
print( node.body )
elif isinstance(node, _ast.FunctionDef):
print( node.name, "FIXME" )
#print( node.name, [('@'+x.id, x) for x in node.decorator_list] )
elif isinstance(node, _ast.Name):
print( node.id, "-----", type( node.ctx ) )
elif isinstance(node, _ast.Attribute):
print( node.attr, "-----", type( node.ctx ) )
elif isinstance(node, _ast.Assign):
print( node.targets, ' = ', node.value )
elif isinstance(node, _ast.AugAssign):
print( node.op )
elif isinstance(node, _ast.Call):
print( node.func )
elif isinstance(node, _ast.arguments):
print( node.args )
elif isinstance(node, _ast.If):
print( node.test, node.body, node.orelse )
elif isinstance(node, _ast.Subscript):
#print( node.value )
#print( type(node.slice)#, '=>', type(node.slice.value) )
print( type(node.slice), "-----", type( node.ctx ) )
else:
printl( node._attributes )
print
self.indent += 3
for item in ast.iter_child_nodes(node):
self.visit(item)
#for field, value in ast.iter_fields(node):
# if isinstance(value, list):
# for item in value:
# if isinstance(item, ast.AST):
# self.visit(item)
# elif isinstance(value, ast.AST):
# self.visit(value)
self.indent -= 3
|
Glyfina-Fernando/pymtl
|
pymtl/tools/deprecated/debug_utils.py
|
Python
|
bsd-3-clause
| 4,345
|
[
"VisIt"
] |
886890c541c35ebe1d9d900e6f55514c55a32a372d1ca51dce9f26b42b694327
|
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from . import (AudioFile, MetaData, InvalidFile, Image,
WaveContainer, AiffContainer)
from .vorbiscomment import VorbisComment
from .id3 import skip_id3v2_comment
#######################
#FLAC
#######################
class InvalidFLAC(InvalidFile):
pass
class FlacMetaDataBlockTooLarge(Exception):
"""raised if one attempts to build a FlacMetaDataBlock too large"""
pass
class FlacMetaData(MetaData):
"""a class for managing a native FLAC's metadata"""
def __init__(self, blocks):
self.__dict__["block_list"] = list(blocks)
def has_block(self, block_id):
"""returns True if the given block ID is present"""
return block_id in [b.BLOCK_ID for b in self.block_list]
def add_block(self, block):
"""adds the given block to our list of blocks"""
#the specification only requires that STREAMINFO be first
#the rest are largely arbitrary,
#though I like to keep PADDING as the last block for aesthetic reasons
PREFERRED_ORDER = [Flac_STREAMINFO.BLOCK_ID,
Flac_SEEKTABLE.BLOCK_ID,
Flac_CUESHEET.BLOCK_ID,
Flac_VORBISCOMMENT.BLOCK_ID,
Flac_PICTURE.BLOCK_ID,
Flac_APPLICATION.BLOCK_ID,
Flac_PADDING.BLOCK_ID]
stop_blocks = set(
PREFERRED_ORDER[PREFERRED_ORDER.index(block.BLOCK_ID) + 1:])
for (index, old_block) in enumerate(self.block_list):
if (old_block.BLOCK_ID in stop_blocks):
self.block_list.insert(index, block)
break
else:
self.block_list.append(block)
def get_block(self, block_id):
"""returns the first instance of the given block_id
may raise IndexError if the block is not in our list of blocks"""
for block in self.block_list:
if (block.BLOCK_ID == block_id):
return block
else:
raise IndexError()
def get_blocks(self, block_id):
"""returns all instances of the given block_id in our list of blocks"""
return [b for b in self.block_list if (b.BLOCK_ID == block_id)]
def replace_blocks(self, block_id, blocks):
"""replaces all instances of the given block_id with
blocks taken from the given list
if insufficient matching blocks are present,
this uses add_block() to populate the remainder
if additional matching blocks are present,
they are removed
"""
new_blocks = []
for block in self.block_list:
if (block.BLOCK_ID == block_id):
if (len(blocks) > 0):
new_blocks.append(blocks.pop(0))
else:
pass
else:
new_blocks.append(block)
self.block_list = new_blocks
while (len(blocks) > 0):
self.add_block(blocks.pop(0))
def __setattr__(self, key, value):
if (key in self.FIELDS):
try:
vorbis_comment = self.get_block(Flac_VORBISCOMMENT.BLOCK_ID)
except IndexError:
#add VORBIS comment block if necessary
vorbis_comment = Flac_VORBISCOMMENT(
[], u"Python Audio Tools %s" % (VERSION))
self.add_block(vorbis_comment)
setattr(vorbis_comment, key, value)
else:
self.__dict__[key] = value
def __getattr__(self, key):
if (key in self.FIELDS):
try:
return getattr(self.get_block(Flac_VORBISCOMMENT.BLOCK_ID),
key)
except IndexError:
#no VORBIS comment block, so all values are None
return None
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(key)
def __delattr__(self, key):
if (key in self.FIELDS):
try:
delattr(self.get_block(Flac_VORBISCOMMENT.BLOCK_ID), key)
except IndexError:
#no VORBIS comment block, so nothing to delete
pass
else:
try:
del(self.__dict__[key])
except KeyError:
raise AttributeError(key)
@classmethod
def converted(cls, metadata):
"""takes a MetaData object and returns a FlacMetaData object"""
if (metadata is None):
return None
elif (isinstance(metadata, FlacMetaData)):
return cls([block.copy() for block in metadata.block_list])
else:
return cls([Flac_VORBISCOMMENT.converted(metadata)] +
[Flac_PICTURE.converted(image)
for image in metadata.images()] +
[Flac_PADDING(4096)])
def add_image(self, image):
"""embeds an Image object in this metadata"""
self.add_block(Flac_PICTURE.converted(image))
def delete_image(self, image):
"""deletes an image object from this metadata"""
self.block_list = [b for b in self.block_list
if not ((b.BLOCK_ID == Flac_PICTURE.BLOCK_ID) and
(b == image))]
def images(self):
"""returns a list of embedded Image objects"""
return self.get_blocks(Flac_PICTURE.BLOCK_ID)
@classmethod
def supports_images(cls):
"""returns True"""
return True
def clean(self, fixes_performed):
"""returns a new FlacMetaData object that's been cleaned of problems
any fixes performed are appended to fixes_performed as unicode"""
from .text import (CLEAN_FLAC_REORDERED_STREAMINFO,
CLEAN_FLAC_MULITPLE_STREAMINFO,
CLEAN_FLAC_MULTIPLE_VORBISCOMMENT,
CLEAN_FLAC_MULTIPLE_SEEKTABLE,
CLEAN_FLAC_MULTIPLE_CUESHEET,
CLEAN_FLAC_UNDEFINED_BLOCK)
cleaned_blocks = []
for block in self.block_list:
if (block.BLOCK_ID == Flac_STREAMINFO.BLOCK_ID):
#reorder STREAMINFO block to be first, if necessary
if (len(cleaned_blocks) == 0):
cleaned_blocks.append(block)
elif (cleaned_blocks[0].BLOCK_ID != block.BLOCK_ID):
fixes_performed.append(
CLEAN_FLAC_REORDERED_STREAMINFO)
cleaned_blocks.insert(0, block)
else:
fixes_performed.append(
CLEAN_FLAC_MULITPLE_STREAMINFO)
elif (block.BLOCK_ID == Flac_VORBISCOMMENT.BLOCK_ID):
if (block.BLOCK_ID in [b.BLOCK_ID for b in cleaned_blocks]):
#remove redundant VORBIS_COMMENT blocks
fixes_performed.append(
CLEAN_FLAC_MULTIPLE_VORBISCOMMENT)
else:
#recursively clean up the text fields in FlacVorbisComment
cleaned_blocks.append(block.clean(fixes_performed))
elif (block.BLOCK_ID == Flac_PICTURE.BLOCK_ID):
#recursively clean up any image blocks
cleaned_blocks.append(block.clean(fixes_performed))
elif (block.BLOCK_ID == Flac_APPLICATION.BLOCK_ID):
cleaned_blocks.append(block)
elif (block.BLOCK_ID == Flac_SEEKTABLE.BLOCK_ID):
#remove redundant seektable, if necessary
if (block.BLOCK_ID in [b.BLOCK_ID for b in cleaned_blocks]):
fixes_performed.append(
CLEAN_FLAC_MULTIPLE_SEEKTABLE)
else:
cleaned_blocks.append(block.clean(fixes_performed))
elif (block.BLOCK_ID == Flac_CUESHEET.BLOCK_ID):
#remove redundant cuesheet, if necessary
if (block.BLOCK_ID in [b.BLOCK_ID for b in cleaned_blocks]):
fixes_performed.append(
CLEAN_FLAC_MULTIPLE_CUESHEET)
else:
cleaned_blocks.append(block)
elif (block.BLOCK_ID == Flac_PADDING.BLOCK_ID):
cleaned_blocks.append(block)
else:
#remove undefined blocks
fixes_performed.append(CLEAN_FLAC_UNDEFINED_BLOCK)
return self.__class__(cleaned_blocks)
def __repr__(self):
return "FlacMetaData(%s)" % (self.block_list)
@classmethod
def parse(cls, reader):
"""returns a FlacMetaData object from the given BitstreamReader
which has already parsed the 4-byte 'fLaC' file ID"""
block_list = []
last = 0
while (last != 1):
(last, block_type, block_length) = reader.parse("1u7u24u")
if (block_type == 0): # STREAMINFO
block_list.append(
Flac_STREAMINFO.parse(reader.substream(block_length)))
elif (block_type == 1): # PADDING
block_list.append(
Flac_PADDING.parse(
reader.substream(block_length), block_length))
elif (block_type == 2): # APPLICATION
block_list.append(
Flac_APPLICATION.parse(
reader.substream(block_length), block_length))
elif (block_type == 3): # SEEKTABLE
block_list.append(
Flac_SEEKTABLE.parse(
reader.substream(block_length), block_length / 18))
elif (block_type == 4): # VORBIS_COMMENT
block_list.append(
Flac_VORBISCOMMENT.parse(
reader.substream(block_length)))
elif (block_type == 5): # CUESHEET
block_list.append(
Flac_CUESHEET.parse(reader.substream(block_length)))
elif (block_type == 6): # PICTURE
block_list.append(
Flac_PICTURE.parse(reader.substream(block_length)))
elif ((block_type >= 7) and (block_type <= 126)):
from .text import ERR_FLAC_RESERVED_BLOCK
raise ValueError(ERR_FLAC_RESERVED_BLOCK % (block_type))
else:
from .text import ERR_FLAC_INVALID_BLOCK
raise ValueError(ERR_FLAC_INVALID_BLOCK)
return cls(block_list)
def raw_info(self):
"""returns human-readable metadata as a unicode string"""
from os import linesep
return linesep.decode('ascii').join(
["FLAC Tags:"] + [block.raw_info() for block in self.blocks()])
def blocks(self):
"""yields FlacMetaData's individual metadata blocks"""
for block in self.block_list:
yield block
def build(self, writer):
"""writes the FlacMetaData to the given BitstreamWriter
not including the 4-byte 'fLaC' file ID"""
from . import iter_last
for (last_block,
block) in iter_last(iter([b for b in self.blocks()
if (b.size() < (2 ** 24))])):
if (not last_block):
writer.build("1u7u24u", (0, block.BLOCK_ID, block.size()))
else:
writer.build("1u7u24u", (1, block.BLOCK_ID, block.size()))
block.build(writer)
def size(self):
"""returns the size of all metadata blocks
including the block headers
but not including the 4-byte 'fLaC' file ID"""
from operator import add
return reduce(add, [4 + b.size() for b in self.block_list], 0)
class Flac_STREAMINFO:
BLOCK_ID = 0
def __init__(self, minimum_block_size, maximum_block_size,
minimum_frame_size, maximum_frame_size,
sample_rate, channels, bits_per_sample,
total_samples, md5sum):
"""all values are non-negative integers except for md5sum
which is a 16-byte binary string"""
self.minimum_block_size = minimum_block_size
self.maximum_block_size = maximum_block_size
self.minimum_frame_size = minimum_frame_size
self.maximum_frame_size = maximum_frame_size
self.sample_rate = sample_rate
self.channels = channels
self.bits_per_sample = bits_per_sample
self.total_samples = total_samples
self.md5sum = md5sum
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_STREAMINFO(self.minimum_block_size,
self.maximum_block_size,
self.minimum_frame_size,
self.maximum_frame_size,
self.sample_rate,
self.channels,
self.bits_per_sample,
self.total_samples,
self.md5sum)
def __eq__(self, block):
for attr in ["minimum_block_size",
"maximum_block_size",
"minimum_frame_size",
"maximum_frame_size",
"sample_rate",
"channels",
"bits_per_sample",
"total_samples",
"md5sum"]:
if ((not hasattr(block, attr)) or (getattr(self, attr) !=
getattr(block, attr))):
return False
else:
return True
def __repr__(self):
return ("Flac_STREAMINFO(%s)" %
",".join(["%s=%s" % (key, repr(getattr(self, key)))
for key in ["minimum_block_size",
"maximum_block_size",
"minimum_frame_size",
"maximum_frame_size",
"sample_rate",
"channels",
"bits_per_sample",
"total_samples",
"md5sum"]]))
def raw_info(self):
"""returns a human-readable version of this metadata block
as unicode"""
from os import linesep
return linesep.decode('ascii').join(
[u" STREAMINFO:",
u" minimum block size = %d" % (self.minimum_block_size),
u" maximum block size = %d" % (self.maximum_block_size),
u" minimum frame size = %d" % (self.minimum_frame_size),
u" maximum frame size = %d" % (self.maximum_frame_size),
u" sample rate = %d" % (self.sample_rate),
u" channels = %d" % (self.channels),
u" bits-per-sample = %d" % (self.bits_per_sample),
u" total samples = %d" % (self.total_samples),
u" MD5 sum = %s" %
(u"".join(["%2.2X" % (ord(b)) for b in self.md5sum]))])
@classmethod
def parse(cls, reader):
"""returns this metadata block from a BitstreamReader"""
values = reader.parse("16u16u24u24u20u3u5u36U16b")
values[5] += 1 # channels
values[6] += 1 # bits-per-sample
return cls(*values)
def build(self, writer):
"""writes this metadata block to a BitstreamWriter"""
writer.build("16u16u24u24u20u3u5u36U16b",
(self.minimum_block_size,
self.maximum_block_size,
self.minimum_frame_size,
self.maximum_frame_size,
self.sample_rate,
self.channels - 1,
self.bits_per_sample - 1,
self.total_samples,
self.md5sum))
def size(self):
"""the size of this metadata block
not including the 4-byte block header"""
return 34
class Flac_VORBISCOMMENT(VorbisComment):
BLOCK_ID = 4
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_VORBISCOMMENT(self.comment_strings[:],
self.vendor_string)
def __repr__(self):
return "Flac_VORBISCOMMENT(%s, %s)" % \
(repr(self.comment_strings), repr(self.vendor_string))
def raw_info(self):
"""returns a human-readable version of this metadata block
as unicode"""
from os import linesep
from . import display_unicode
#align the text strings on the "=" sign, if any
if (len(self.comment_strings) > 0):
max_indent = max([len(display_unicode(comment.split(u"=", 1)[0]))
for comment in self.comment_strings
if u"=" in comment])
else:
max_indent = 0
comment_strings = []
for comment in self.comment_strings:
if (u"=" in comment):
comment_strings.append(
u" " * (4 + max_indent -
len(display_unicode(comment.split(u"=", 1)[0]))) +
comment)
else:
comment_strings.append(u" " * 4 + comment)
return linesep.decode('ascii').join(
[u" VORBIS_COMMENT:",
u" %s" % (self.vendor_string)] +
comment_strings)
@classmethod
def converted(cls, metadata):
"""converts a MetaData object to a Flac_VORBISCOMMENT object"""
if ((metadata is None) or (isinstance(metadata, Flac_VORBISCOMMENT))):
return metadata
else:
#make VorbisComment do all the work,
#then lift its data into a new Flac_VORBISCOMMENT
metadata = VorbisComment.converted(metadata)
return cls(metadata.comment_strings,
metadata.vendor_string)
@classmethod
def parse(cls, reader):
"""returns this metadata block from a BitstreamReader"""
reader.set_endianness(1)
vendor_string = reader.read_bytes(reader.read(32)).decode('utf-8',
'replace')
return cls([reader.read_bytes(reader.read(32)).decode('utf-8',
'replace')
for i in xrange(reader.read(32))], vendor_string)
def build(self, writer):
"""writes this metadata block to a BitstreamWriter"""
writer.set_endianness(1)
vendor_string = self.vendor_string.encode('utf-8')
writer.build("32u%db" % (len(vendor_string)),
(len(vendor_string), vendor_string))
writer.write(32, len(self.comment_strings))
for comment_string in self.comment_strings:
comment_string = comment_string.encode('utf-8')
writer.build("32u%db" % (len(comment_string)),
(len(comment_string), comment_string))
writer.set_endianness(0)
def size(self):
"""the size of this metadata block
not including the 4-byte block header"""
from operator import add
return (4 + len(self.vendor_string.encode('utf-8')) +
4 +
reduce(add, [4 + len(comment.encode('utf-8'))
for comment in self.comment_strings], 0))
class Flac_PICTURE(Image):
BLOCK_ID = 6
def __init__(self, picture_type, mime_type, description,
width, height, color_depth, color_count, data):
self.__dict__["data"] = data
self.__dict__["mime_type"] = mime_type
self.__dict__["width"] = width
self.__dict__["height"] = height
self.__dict__["color_depth"] = color_depth
self.__dict__["color_count"] = color_count
self.__dict__["description"] = description
self.__dict__["picture_type"] = picture_type
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_PICTURE(self.picture_type,
self.mime_type,
self.description,
self.width,
self.height,
self.color_depth,
self.color_count,
self.data)
def __getattr__(self, key):
if (key == "type"):
#convert FLAC picture_type to Image type
#
# | Item | FLAC Picture ID | Image type |
# |--------------+-----------------+------------|
# | Other | 0 | 4 |
# | Front Cover | 3 | 0 |
# | Back Cover | 4 | 1 |
# | Leaflet Page | 5 | 2 |
# | Media | 6 | 3 |
return {0: 4, 3: 0, 4: 1, 5: 2, 6: 3}.get(self.picture_type, 4)
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if (key == "type"):
#convert Image type to FLAC picture_type
#
# | Item | Image type | FLAC Picture ID |
# |--------------+------------+-----------------|
# | Other | 4 | 0 |
# | Front Cover | 0 | 3 |
# | Back Cover | 1 | 4 |
# | Leaflet Page | 2 | 5 |
# | Media | 3 | 6 |
self.picture_type = {4: 0, 0: 3, 1: 4, 2: 5, 3: 6}.get(value, 0)
else:
self.__dict__[key] = value
def __repr__(self):
return ("Flac_PICTURE(%s)" %
",".join(["%s=%s" % (key, repr(getattr(self, key)))
for key in ["picture_type",
"mime_type",
"description",
"width",
"height",
"color_depth",
"color_count"]]))
def raw_info(self):
"""returns a human-readable version of this metadata block
as unicode"""
from os import linesep
return linesep.decode('ascii').join(
[u" PICTURE:",
u" picture type = %d" % (self.picture_type),
u" MIME type = %s" % (self.mime_type),
u" description = %s" % (self.description),
u" width = %d" % (self.width),
u" height = %d" % (self.height),
u" color depth = %d" % (self.color_depth),
u" color count = %d" % (self.color_count),
u" bytes = %d" % (len(self.data))])
@classmethod
def parse(cls, reader):
"""returns this metadata block from a BitstreamReader"""
return cls(
picture_type=reader.read(32),
mime_type=reader.read_bytes(reader.read(32)).decode('ascii'),
description=reader.read_bytes(reader.read(32)).decode('utf-8'),
width=reader.read(32),
height=reader.read(32),
color_depth=reader.read(32),
color_count=reader.read(32),
data=reader.read_bytes(reader.read(32)))
def build(self, writer):
"""writes this metadata block to a BitstreamWriter"""
writer.build("32u [ 32u%db ] [32u%db ] 32u 32u 32u 32u [ 32u%db ]" %
(len(self.mime_type.encode('ascii')),
len(self.description.encode('utf-8')),
len(self.data)),
(self.picture_type,
len(self.mime_type.encode('ascii')),
self.mime_type.encode('ascii'),
len(self.description.encode('utf-8')),
self.description.encode('utf-8'),
self.width,
self.height,
self.color_depth,
self.color_count,
len(self.data),
self.data))
def size(self):
"""the size of this metadata block
not including the 4-byte block header"""
from .bitstream import format_size
return format_size(
"32u [ 32u%db ] [32u%db ] 32u 32u 32u 32u [ 32u%db ]" %
(len(self.mime_type.encode('ascii')),
len(self.description.encode('utf-8')),
len(self.data))) / 8
@classmethod
def converted(cls, image):
"""converts an Image object to a FlacPictureComment"""
return cls(
picture_type={4: 0, 0: 3, 1: 4, 2: 5, 3: 6}.get(image.type, 0),
mime_type=image.mime_type,
description=image.description,
width=image.width,
height=image.height,
color_depth=image.color_depth,
color_count=image.color_count,
data=image.data)
def type_string(self):
"""returns the image's type as a human readable plain string
for example, an image of type 0 returns "Front Cover"
"""
return {0: "Other",
1: "File icon",
2: "Other file icon",
3: "Cover (front)",
4: "Cover (back)",
5: "Leaflet page",
6: "Media",
7: "Lead artist / lead performer / soloist",
8: "Artist / Performer",
9: "Conductor",
10: "Band / Orchestra",
11: "Composer",
12: "Lyricist / Text writer",
13: "Recording Location",
14: "During recording",
15: "During performance",
16: "Movie / Video screen capture",
17: "A bright colored fish",
18: "Illustration",
19: "Band/Artist logotype",
20: "Publisher / Studio logotype"}.get(self.picture_type,
"Other")
def clean(self, fixes_performed):
from .image import image_metrics
img = image_metrics(self.data)
if (((self.mime_type != img.mime_type) or
(self.width != img.width) or
(self.height != img.height) or
(self.color_depth != img.bits_per_pixel) or
(self.color_count != img.color_count))):
from .text import CLEAN_FIX_IMAGE_FIELDS
fixes_performed.append(CLEAN_FIX_IMAGE_FIELDS)
return self.__class__.converted(
Image(type=self.type,
mime_type=img.mime_type,
description=self.description,
width=img.width,
height=img.height,
color_depth=img.bits_per_pixel,
color_count=img.color_count,
data=self.data))
else:
return self
class Flac_APPLICATION:
BLOCK_ID = 2
def __init__(self, application_id, data):
self.application_id = application_id
self.data = data
def __eq__(self, block):
for attr in ["application_id", "data"]:
if ((not hasattr(block, attr)) or (getattr(self, attr) !=
getattr(block, attr))):
return False
else:
return True
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_APPLICATION(self.application_id,
self.data)
def __repr__(self):
return "Flac_APPLICATION(%s, %s)" % (repr(self.application_id),
repr(self.data))
def raw_info(self):
"""returns a human-readable version of this metadata block
as unicode"""
from os import linesep
return u" APPLICATION:%s %s (%d bytes)" % \
(linesep.decode('ascii'),
self.application_id.decode('ascii'),
len(self.data))
@classmethod
def parse(cls, reader, block_length):
"""returns this metadata block from a BitstreamReader"""
return cls(application_id=reader.read_bytes(4),
data=reader.read_bytes(block_length - 4))
def build(self, writer):
"""writes this metadata block to a BitstreamWriter"""
writer.write_bytes(self.application_id)
writer.write_bytes(self.data)
def size(self):
"""the size of this metadata block
not including the 4-byte block header"""
return len(self.application_id) + len(self.data)
class Flac_SEEKTABLE:
BLOCK_ID = 3
def __init__(self, seekpoints):
"""seekpoints is a list of
(PCM frame offset, byte offset, PCM frame count) tuples"""
self.seekpoints = seekpoints
def __eq__(self, block):
if (hasattr(block, "seekpoints")):
return self.seekpoints == block.seekpoints
else:
return False
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_SEEKTABLE(self.seekpoints[:])
def __repr__(self):
return "Flac_SEEKTABLE(%s)" % (repr(self.seekpoints))
def raw_info(self):
"""returns a human-readable version of this metadata block
as unicode"""
from os import linesep
return linesep.decode('ascii').join(
[u" SEEKTABLE:",
u" first sample file offset frame samples"] +
[u" %14.1d %13.1X %15.d" % seekpoint
for seekpoint in self.seekpoints])
@classmethod
def parse(cls, reader, total_seekpoints):
"""returns this metadata block from a BitstreamReader"""
return cls([tuple(reader.parse("64U64U16u"))
for i in xrange(total_seekpoints)])
def build(self, writer):
"""writes this metadata block to a BitstreamWriter"""
for seekpoint in self.seekpoints:
writer.build("64U64U16u", seekpoint)
def size(self):
"""the size of this metadata block
not including the 4-byte block header"""
from .bitstream import format_size
return (format_size("64U64U16u") / 8) * len(self.seekpoints)
def clean(self, fixes_performed):
"""removes any empty seek points
and ensures PCM frame offset and byte offset
are both incrementing"""
nonempty_points = [seekpoint for seekpoint in self.seekpoints
if (seekpoint[2] != 0)]
if (len(nonempty_points) != len(self.seekpoints)):
from .text import CLEAN_FLAC_REMOVE_SEEKPOINTS
fixes_performed.append(CLEAN_FLAC_REMOVE_SEEKPOINTS)
ascending_order = list(set(nonempty_points))
ascending_order.sort()
if (ascending_order != nonempty_points):
from .text import CLEAN_FLAC_REORDER_SEEKPOINTS
fixes_performed.append(CLEAN_FLAC_REORDER_SEEKPOINTS)
return Flac_SEEKTABLE(ascending_order)
class Flac_CUESHEET:
BLOCK_ID = 5
def __init__(self, catalog_number, lead_in_samples, is_cdda, tracks):
self.catalog_number = catalog_number
self.lead_in_samples = lead_in_samples
self.is_cdda = is_cdda
self.tracks = tracks
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_CUESHEET(self.catalog_number,
self.lead_in_samples,
self.is_cdda,
[track.copy() for track in self.tracks])
def __eq__(self, cuesheet):
for attr in ["catalog_number",
"lead_in_samples",
"is_cdda",
"tracks"]:
if ((not hasattr(cuesheet, attr)) or (getattr(self, attr) !=
getattr(cuesheet, attr))):
return False
else:
return True
def __repr__(self):
return ("Flac_CUESHEET(%s)" %
",".join(["%s=%s" % (key, repr(getattr(self, key)))
for key in ["catalog_number",
"lead_in_samples",
"is_cdda",
"tracks"]]))
def raw_info(self):
"""returns a human-readable version of this metadata block
as unicode"""
from os import linesep
return linesep.decode('ascii').join(
[u" CUESHEET:",
u" catalog number = %s" %
(self.catalog_number.decode('ascii', 'replace')),
u" lead-in samples = %d" % (self.lead_in_samples),
u" is CDDA = %d" % (self.is_cdda),
u"%9s %5s %8s %13s %12s" % (u"track",
u"audio",
u"pre-emph",
u"offset",
u"ISRC")] +
[track.raw_info() for track in self.tracks])
@classmethod
def parse(cls, reader):
"""returns this metadata block from a BitstreamReader"""
(catalog_number,
lead_in_samples,
is_cdda,
track_count) = reader.parse("128b64U1u2071p8u")
return cls(catalog_number,
lead_in_samples,
is_cdda,
[Flac_CUESHEET_track.parse(reader)
for i in xrange(track_count)])
def build(self, writer):
"""writes this metadata block to a BitstreamWriter"""
writer.build("128b64U1u2071p8u",
(self.catalog_number,
self.lead_in_samples,
self.is_cdda,
len(self.tracks)))
for track in self.tracks:
track.build(writer)
def size(self):
"""the size of this metadata block
not including the 4-byte block header"""
from .bitstream import BitstreamAccumulator
a = BitstreamAccumulator(0)
self.build(a)
return a.bytes()
@classmethod
def converted(cls, sheet, total_frames, sample_rate=44100):
"""converts a cuesheet compatible object to Flac_CUESHEET objects
a total_frames integer (in PCM frames) is also required
"""
if (sheet.catalog() is None):
catalog_number = chr(0) * 128
else:
catalog_number = sheet.catalog() + (chr(0) *
(128 - len(sheet.catalog())))
ISRCs = sheet.ISRCs()
return cls(
catalog_number=catalog_number,
lead_in_samples=sample_rate * 2,
is_cdda=1 if sample_rate == 44100 else 0,
tracks=[Flac_CUESHEET_track(
offset=indexes[0] * sample_rate / 75,
number=i + 1,
ISRC=ISRCs.get(i + 1, chr(0) * 12),
track_type=0,
pre_emphasis=0,
index_points=[
Flac_CUESHEET_index(
offset=(index - indexes[0]) * sample_rate / 75,
number=point_number + (1 if len(indexes) == 1
else 0))
for (point_number, index) in enumerate(indexes)])
for (i, indexes) in enumerate(sheet.indexes())] +
# lead-out track
[Flac_CUESHEET_track(offset=total_frames,
number=170,
ISRC=chr(0) * 12,
track_type=0,
pre_emphasis=0,
index_points=[])])
def catalog(self):
"""returns the cuesheet's catalog number as a plain string"""
catalog_number = self.catalog_number.rstrip(chr(0))
if (len(catalog_number) > 0):
return catalog_number
else:
return None
def ISRCs(self):
"""returns a dict of ISRC values as plain strings"""
return dict([(track.number, track.ISRC) for track in
self.tracks
if ((track.number != 170) and
(len(track.ISRC.strip(chr(0))) > 0))])
def indexes(self, sample_rate=44100):
"""returns a list of (start, end) integer tuples"""
return [tuple([(index.offset + track.offset) * 75 / sample_rate
for index in
sorted(track.index_points,
lambda i1, i2: cmp(i1.number, i2.number))])
for track in
sorted(self.tracks, lambda t1, t2: cmp(t1.number, t2.number))
if (track.number != 170)]
def pcm_lengths(self, total_length, sample_rate):
"""returns a list of PCM lengths for all cuesheet audio tracks
note that the total_length and sample_rate variables
are only for compatibility
as FLAC's CUESHEET blocks store sample counts directly
"""
if (len(self.tracks) > 0):
return [(current.offset +
max([i.offset for i in current.index_points] + [0])) -
((previous.offset +
max([i.offset for i in previous.index_points] + [0])))
for (previous, current) in
zip(self.tracks, self.tracks[1:])]
else:
return []
class Flac_CUESHEET_track:
def __init__(self, offset, number, ISRC, track_type, pre_emphasis,
index_points):
self.offset = offset
self.number = number
self.ISRC = ISRC
self.track_type = track_type
self.pre_emphasis = pre_emphasis
self.index_points = index_points
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_CUESHEET_track(self.offset,
self.number,
self.ISRC,
self.track_type,
self.pre_emphasis,
[index.copy() for index in
self.index_points])
def __repr__(self):
return ("Flac_CUESHEET_track(%s)" %
",".join(["%s=%s" % (key, repr(getattr(self, key)))
for key in ["offset",
"number",
"ISRC",
"track_type",
"pre_emphasis",
"index_points"]]))
def raw_info(self):
"""returns a human-readable version of this track as unicode"""
if (len(self.ISRC.strip(chr(0))) > 0):
return u"%9.d %5s %8s %13.d %12s" % \
(self.number,
u"yes" if self.track_type == 0 else u"no",
u"yes" if self.pre_emphasis == 1 else u"no",
self.offset,
self.ISRC)
else:
return u"%9.d %5s %8s %13.d" % \
(self.number,
u"yes" if self.track_type == 0 else u"no",
u"yes" if self.pre_emphasis == 1 else u"no",
self.offset)
def __eq__(self, track):
for attr in ["offset",
"number",
"ISRC",
"track_type",
"pre_emphasis",
"index_points"]:
if ((not hasattr(track, attr)) or (getattr(self, attr) !=
getattr(track, attr))):
return False
else:
return True
@classmethod
def parse(cls, reader):
"""returns this cuesheet track from a BitstreamReader"""
(offset,
number,
ISRC,
track_type,
pre_emphasis,
index_points) = reader.parse("64U8u12b1u1u110p8u")
return cls(offset, number, ISRC, track_type, pre_emphasis,
[Flac_CUESHEET_index.parse(reader)
for i in xrange(index_points)])
def build(self, writer):
"""writes this cuesheet track to a BitstreamWriter"""
writer.build("64U8u12b1u1u110p8u",
(self.offset,
self.number,
self.ISRC,
self.track_type,
self.pre_emphasis,
len(self.index_points)))
for index_point in self.index_points:
index_point.build(writer)
class Flac_CUESHEET_index:
def __init__(self, offset, number):
self.offset = offset
self.number = number
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_CUESHEET_index(self.offset, self.number)
def __repr__(self):
return "Flac_CUESHEET_index(%s, %s)" % (repr(self.offset),
repr(self.number))
def __eq__(self, index):
try:
return ((self.offset == index.offset) and
(self.number == index.number))
except AttributeError:
return False
@classmethod
def parse(cls, reader):
"""returns this cuesheet index from a BitstreamReader"""
(offset, number) = reader.parse("64U8u24p")
return cls(offset, number)
def build(self, writer):
"""writes this cuesheet index to a BitstreamWriter"""
writer.build("64U8u24p", (self.offset, self.number))
class Flac_PADDING:
BLOCK_ID = 1
def __init__(self, length):
self.length = length
def copy(self):
"""returns a duplicate of this metadata block"""
return Flac_PADDING(self.length)
def __repr__(self):
return "Flac_PADDING(%d)" % (self.length)
def raw_info(self):
"""returns a human-readable version of this metadata block
as unicode"""
from os import linesep
return linesep.decode('ascii').join(
[u" PADDING:",
u" length = %d" % (self.length)])
@classmethod
def parse(cls, reader, block_length):
"""returns this metadata block from a BitstreamReader"""
reader.skip_bytes(block_length)
return cls(length=block_length)
def build(self, writer):
"""writes this metadata block to a BitstreamWriter"""
writer.write_bytes(chr(0) * self.length)
def size(self):
"""the size of this metadata block
not including the 4-byte block header"""
return self.length
class FlacAudio(WaveContainer, AiffContainer):
"""a Free Lossless Audio Codec file"""
from .text import (COMP_FLAC_0,
COMP_FLAC_8)
SUFFIX = "flac"
NAME = SUFFIX
DESCRIPTION = u"Free Lossless Audio Codec"
DEFAULT_COMPRESSION = "8"
COMPRESSION_MODES = tuple(map(str, range(0, 9)))
COMPRESSION_DESCRIPTIONS = {"0": COMP_FLAC_0,
"8": COMP_FLAC_8}
METADATA_CLASS = FlacMetaData
def __init__(self, filename):
"""filename is a plain string"""
AudioFile.__init__(self, filename)
self.__samplerate__ = 0
self.__channels__ = 0
self.__bitspersample__ = 0
self.__total_frames__ = 0
self.__stream_offset__ = 0
self.__md5__ = chr(0) * 16
try:
self.__read_streaminfo__()
except IOError, msg:
raise InvalidFLAC(str(msg))
def channel_mask(self):
"""returns a ChannelMask object of this track's channel layout"""
from . import ChannelMask
if (self.channels() <= 2):
return ChannelMask.from_channels(self.channels())
try:
metadata = self.get_metadata()
if (metadata is not None):
return ChannelMask(
int(metadata.get_block(
Flac_VORBISCOMMENT.BLOCK_ID)[
u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"][0], 16))
else:
#proceed to generate channel mask
raise ValueError()
except (IndexError, KeyError, ValueError):
#if there is no VORBIS_COMMENT block
#or no WAVEFORMATEXTENSIBLE_CHANNEL_MASK in that block
#or it's not an integer,
#use FLAC's default mask based on channels
if (self.channels() == 3):
return ChannelMask.from_fields(
front_left=True, front_right=True, front_center=True)
elif (self.channels() == 4):
return ChannelMask.from_fields(
front_left=True, front_right=True,
back_left=True, back_right=True)
elif (self.channels() == 5):
return ChannelMask.from_fields(
front_left=True, front_right=True, front_center=True,
back_left=True, back_right=True)
elif (self.channels() == 6):
return ChannelMask.from_fields(
front_left=True, front_right=True, front_center=True,
back_left=True, back_right=True,
low_frequency=True)
else:
return ChannelMask(0)
def lossless(self):
"""returns True"""
return True
def get_metadata(self):
"""returns a MetaData object, or None
raises IOError if unable to read the file"""
#FlacAudio *always* returns a FlacMetaData object
#even if the blocks aren't present
#so there's no need to test for None
f = file(self.filename, 'rb')
try:
f.seek(self.__stream_offset__, 0)
if (f.read(4) != 'fLaC'):
return None
else:
from .bitstream import BitstreamReader
return FlacMetaData.parse(BitstreamReader(f, 0))
finally:
f.close()
def update_metadata(self, metadata):
"""takes this track's current MetaData object
as returned by get_metadata() and sets this track's metadata
with any fields updated in that object
raises IOError if unable to write the file
"""
from .bitstream import BitstreamWriter
from .bitstream import BitstreamAccumulator
from .bitstream import BitstreamReader
from operator import add
if (metadata is None):
return
if (not isinstance(metadata, FlacMetaData)):
from .text import ERR_FOREIGN_METADATA
raise ValueError(ERR_FOREIGN_METADATA)
has_padding = len(metadata.get_blocks(Flac_PADDING.BLOCK_ID)) > 0
if (has_padding):
total_padding_size = sum(
[b.size() for b in metadata.get_blocks(Flac_PADDING.BLOCK_ID)])
else:
total_padding_size = 0
metadata_delta = metadata.size() - self.metadata_length()
if (has_padding and (metadata_delta <= total_padding_size)):
#if padding size is larger than change in metadata
#shrink padding blocks so that new size matches old size
#(if metadata_delta is negative,
# this will enlarge padding blocks as necessary)
for padding in metadata.get_blocks(Flac_PADDING.BLOCK_ID):
if (metadata_delta > 0):
#extract bytes from PADDING blocks
#until the metadata_delta is exhausted
if (metadata_delta <= padding.length):
padding.length -= metadata_delta
metadata_delta = 0
else:
metadata_delta -= padding.length
padding.length = 0
elif (metadata_delta < 0):
#dump all our new bytes into the first PADDING block found
padding.length -= metadata_delta
metadata_delta = 0
else:
break
#then overwrite the beginning of the file
stream = file(self.filename, 'r+b')
stream.write('fLaC')
metadata.build(BitstreamWriter(stream, 0))
stream.close()
else:
#if padding is smaller than change in metadata,
#or file has no padding,
#rewrite entire file to fit new metadata
import tempfile
from . import transfer_data
stream = file(self.filename, 'rb')
stream.seek(self.__stream_offset__, 0)
if (stream.read(4) != 'fLaC'):
from .text import ERR_FLAC_INVALID_FILE
raise InvalidFLAC(ERR_FLAC_INVALID_FILE)
#skip the existing metadata blocks
stop = 0
reader = BitstreamReader(stream, 0)
while (stop == 0):
(stop, length) = reader.parse("1u 7p 24u")
reader.skip_bytes(length)
#write the remaining data stream to a temp file
file_data = tempfile.TemporaryFile()
transfer_data(stream.read, file_data.write)
file_data.seek(0, 0)
#finally, rebuild our file using new metadata and old stream
stream = file(self.filename, 'wb')
stream.write('fLaC')
writer = BitstreamWriter(stream, 0)
metadata.build(writer)
writer.flush()
transfer_data(file_data.read, stream.write)
file_data.close()
stream.close()
def set_metadata(self, metadata):
"""takes a MetaData object and sets this track's metadata
this metadata includes track name, album name, and so on
raises IOError if unable to read or write the file"""
new_metadata = self.METADATA_CLASS.converted(metadata)
if (new_metadata is None):
return
old_metadata = self.get_metadata()
if (old_metadata is None):
#this shouldn't happen
old_metadata = FlacMetaData([])
#replace old metadata's VORBIS_COMMENT with one from new metadata
#(if any)
if (new_metadata.has_block(Flac_VORBISCOMMENT.BLOCK_ID)):
new_vorbiscomment = new_metadata.get_block(
Flac_VORBISCOMMENT.BLOCK_ID)
if (old_metadata.has_block(Flac_VORBISCOMMENT.BLOCK_ID)):
#both new and old metadata has a VORBIS_COMMENT block
old_vorbiscomment = old_metadata.get_block(
Flac_VORBISCOMMENT.BLOCK_ID)
#update vendor string from our current VORBIS_COMMENT block
new_vorbiscomment.vendor_string = \
old_vorbiscomment.vendor_string
#update REPLAYGAIN_* tags from our current VORBIS_COMMENT block
for key in [u"REPLAYGAIN_TRACK_GAIN",
u"REPLAYGAIN_TRACK_PEAK",
u"REPLAYGAIN_ALBUM_GAIN",
u"REPLAYGAIN_ALBUM_PEAK",
u"REPLAYGAIN_REFERENCE_LOUDNESS"]:
try:
new_vorbiscomment[key] = old_vorbiscomment[key]
except KeyError:
new_vorbiscomment[key] = []
#update WAVEFORMATEXTENSIBLE_CHANNEL_MASK
#from our current VORBIS_COMMENT block, if any
if (((self.channels() > 2) or
(self.bits_per_sample() > 16)) and
(u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK" in
old_vorbiscomment.keys())):
new_vorbiscomment[u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"] = \
old_vorbiscomment[u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"]
elif (u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK" in
new_vorbiscomment.keys()):
new_vorbiscomment[
u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"] = []
old_metadata.replace_blocks(Flac_VORBISCOMMENT.BLOCK_ID,
[new_vorbiscomment])
else:
#new metadata has VORBIS_COMMENT block,
#but old metadata does not
#remove REPLAYGAIN_* tags from new VORBIS_COMMENT block
for key in [u"REPLAYGAIN_TRACK_GAIN",
u"REPLAYGAIN_TRACK_PEAK",
u"REPLAYGAIN_ALBUM_GAIN",
u"REPLAYGAIN_ALBUM_PEAK",
u"REPLAYGAIN_REFERENCE_LOUDNESS"]:
new_vorbiscomment[key] = []
#update WAVEFORMATEXTENSIBLE_CHANNEL_MASK
#from our actual mask if necessary
if ((self.channels() > 2) or (self.bits_per_sample() > 16)):
new_vorbiscomment[u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"] = [
u"0x%.4X" % (self.channel_mask())]
old_metadata.add_block(new_vorbiscomment)
else:
#new metadata has no VORBIS_COMMENT block
pass
#replace old metadata's PICTURE blocks with those from new metadata
old_metadata.replace_blocks(
Flac_PICTURE.BLOCK_ID,
new_metadata.get_blocks(Flac_PICTURE.BLOCK_ID))
#everything else remains as-is
self.update_metadata(old_metadata)
def metadata_length(self):
"""returns the length of all FLAC metadata blocks as an integer
not including the 4 byte "fLaC" file header"""
from .bitstream import BitstreamReader
counter = 0
f = file(self.filename, 'rb')
try:
f.seek(self.__stream_offset__, 0)
reader = BitstreamReader(f, 0)
if (reader.read_bytes(4) != 'fLaC'):
from .text import ERR_FLAC_INVALID_FILE
raise InvalidFLAC(ERR_FLAC_INVALID_FILE)
stop = 0
while (stop == 0):
(stop, block_id, length) = reader.parse("1u 7u 24u")
counter += 4
reader.skip_bytes(length)
counter += length
return counter
finally:
f.close()
def delete_metadata(self):
"""deletes the track's MetaData
this removes or unsets tags as necessary in order to remove all data
raises IOError if unable to write the file"""
self.set_metadata(MetaData())
@classmethod
def __block_ids__(cls, flacfile):
"""yields a block_id int per metadata block
raises ValueError if a block_id is invalid
"""
valid_block_ids = frozenset(range(0, 6 + 1))
from .bitstream import BitstreamReader
reader = BitstreamReader(flacfile, 0)
stop = 0
while (stop == 0):
(stop, block_id, length) = reader.parse("1u 7u 24u")
if (block_id in valid_block_ids):
yield block_id
else:
from .text import ERR_FLAC_INVALID_BLOCK
raise ValueError(ERR_FLAC_INVALID_BLOCK)
reader.skip_bytes(length)
def set_cuesheet(self, cuesheet):
"""imports cuesheet data from a Cuesheet-compatible object
this are objects with catalog(), ISRCs(), indexes(), and pcm_lengths()
methods. Raises IOError if an error occurs setting the cuesheet"""
if (cuesheet is not None):
metadata = self.get_metadata()
if (metadata is not None):
metadata.add_block(
Flac_CUESHEET.converted(
cuesheet, self.total_frames(), self.sample_rate()))
self.update_metadata(metadata)
def get_cuesheet(self):
"""returns the embedded Cuesheet-compatible object, or None
raises IOError if a problem occurs when reading the file"""
try:
metadata = self.get_metadata()
if (metadata is not None):
return metadata.get_block(Flac_CUESHEET.BLOCK_ID)
else:
return None
except IndexError:
return None
def to_pcm(self):
"""returns a PCMReader object containing the track's PCM data"""
from . import decoders
from . import PCMReaderError
try:
return decoders.FlacDecoder(self.filename,
self.channel_mask(),
self.__stream_offset__)
except (IOError, ValueError), msg:
#The only time this is likely to occur is
#if the FLAC is modified between when FlacAudio
#is initialized and when to_pcm() is called.
return PCMReaderError(error_message=str(msg),
sample_rate=self.sample_rate(),
channels=self.channels(),
channel_mask=int(self.channel_mask()),
bits_per_sample=self.bits_per_sample())
@classmethod
def from_pcm(cls, filename, pcmreader, compression=None,
encoding_function=None):
"""encodes a new file from PCM data
takes a filename string, PCMReader object
and optional compression level string
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new FlacAudio object"""
from .encoders import encode_flac
from . import EncodingError
from . import UnsupportedChannelCount
from . import BufferedPCMReader
from . import __default_quality__
if ((compression is None) or (compression not in
cls.COMPRESSION_MODES)):
compression = __default_quality__(cls.NAME)
encoding_options = {
"0": {"block_size": 1152,
"max_lpc_order": 0,
"min_residual_partition_order": 0,
"max_residual_partition_order": 3},
"1": {"block_size": 1152,
"max_lpc_order": 0,
"adaptive_mid_side": True,
"min_residual_partition_order": 0,
"max_residual_partition_order": 3},
"2": {"block_size": 1152,
"max_lpc_order": 0,
"exhaustive_model_search": True,
"min_residual_partition_order": 0,
"max_residual_partition_order": 3},
"3": {"block_size": 4096,
"max_lpc_order": 6,
"min_residual_partition_order": 0,
"max_residual_partition_order": 4},
"4": {"block_size": 4096,
"max_lpc_order": 8,
"adaptive_mid_side": True,
"min_residual_partition_order": 0,
"max_residual_partition_order": 4},
"5": {"block_size": 4096,
"max_lpc_order": 8,
"mid_side": True,
"min_residual_partition_order": 0,
"max_residual_partition_order": 5},
"6": {"block_size": 4096,
"max_lpc_order": 8,
"mid_side": True,
"min_residual_partition_order": 0,
"max_residual_partition_order": 6},
"7": {"block_size": 4096,
"max_lpc_order": 8,
"mid_side": True,
"exhaustive_model_search": True,
"min_residual_partition_order": 0,
"max_residual_partition_order": 6},
"8": {"block_size": 4096,
"max_lpc_order": 12,
"mid_side": True,
"exhaustive_model_search": True,
"min_residual_partition_order": 0,
"max_residual_partition_order": 6}}[compression]
if (pcmreader.channels > 8):
raise UnsupportedChannelCount(filename, pcmreader.channels)
if (int(pcmreader.channel_mask) == 0):
if (pcmreader.channels <= 6):
channel_mask = {1: 0x0004,
2: 0x0003,
3: 0x0007,
4: 0x0033,
5: 0x0037,
6: 0x003F}[pcmreader.channels]
else:
channel_mask = 0
elif (int(pcmreader.channel_mask) not in
(0x0001, # 1ch - mono
0x0004, # 1ch - mono
0x0003, # 2ch - left, right
0x0007, # 3ch - left, right, center
0x0033, # 4ch - left, right, back left, back right
0x0603, # 4ch - left, right, side left, side right
0x0037, # 5ch - L, R, C, back left, back right
0x0607, # 5ch - L, R, C, side left, side right
0x003F, # 6ch - L, R, C, LFE, back left, back right
0x060F)): # 6ch - L, R, C, LFE, side left, side right
from . import UnsupportedChannelMask
raise UnsupportedChannelMask(filename,
int(pcmreader.channel_mask))
else:
channel_mask = int(pcmreader.channel_mask)
try:
offsets = (encode_flac if encoding_function is None
else encoding_function)(filename,
pcmreader=
BufferedPCMReader(pcmreader),
**encoding_options)
flac = FlacAudio(filename)
metadata = flac.get_metadata()
assert(metadata is not None)
#generate SEEKTABLE from encoder offsets and add it to metadata
seekpoint_interval = pcmreader.sample_rate * 10
metadata.add_block(
flac.seektable(
[(byte_offset,
pcm_frames) for byte_offset, pcm_frames in offsets],
seekpoint_interval))
#if channels or bps is too high,
#automatically generate and add channel mask
if ((((pcmreader.channels > 2) or
(pcmreader.bits_per_sample > 16)) and
(channel_mask != 0))):
vorbis = metadata.get_block(Flac_VORBISCOMMENT.BLOCK_ID)
vorbis[u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"] = [
u"0x%.4X" % (channel_mask)]
flac.update_metadata(metadata)
return flac
except (IOError, ValueError), err:
cls.__unlink__(filename)
raise EncodingError(str(err))
except Exception, err:
cls.__unlink__(filename)
raise err
def seektable(self, offsets=None, seekpoint_interval=None):
"""returns a new Flac_SEEKTABLE object
created from parsing the FLAC file itself"""
from bisect import bisect_right
if (offsets is None):
metadata_length = (self.__stream_offset__ +
4 + self.metadata_length())
offsets = [(byte_offset - metadata_length,
pcm_frames) for byte_offset, pcm_frames in
self.to_pcm().offsets()]
if (seekpoint_interval is None):
seekpoint_interval = self.sample_rate() * 10
total_samples = 0
all_frames = {}
sample_offsets = []
for (byte_offset, pcm_frames) in offsets:
all_frames[total_samples] = (byte_offset, pcm_frames)
sample_offsets.append(total_samples)
total_samples += pcm_frames
seekpoints = []
for pcm_frame in xrange(0,
self.total_frames(),
seekpoint_interval):
flac_frame = bisect_right(sample_offsets, pcm_frame) - 1
seekpoints.append((sample_offsets[flac_frame],
all_frames[sample_offsets[flac_frame]][0],
all_frames[sample_offsets[flac_frame]][1]))
return Flac_SEEKTABLE(seekpoints)
def has_foreign_wave_chunks(self):
"""returns True if the audio file contains non-audio RIFF chunks
during transcoding, if the source audio file has foreign RIFF chunks
and the target audio format supports foreign RIFF chunks,
conversion should be routed through .wav conversion
to avoid losing those chunks"""
try:
metadata = self.get_metadata()
if (metadata is not None):
return 'riff' in [
block.application_id for block in
metadata.get_blocks(Flac_APPLICATION.BLOCK_ID)]
else:
return False
except IOError:
return False
def wave_header_footer(self):
"""returns (header, footer) tuple of strings
containing all data before and after the PCM stream"""
from .wav import pad_data
header = []
if (pad_data(self.total_frames(),
self.channels(),
self.bits_per_sample())):
footer = [chr(0)]
else:
footer = []
current_block = header
metadata = self.get_metadata()
if (metadata is None):
#FIXME
raise ValueError("no foreign RIFF chunks")
#convert individual chunks into combined header and footer strings
for block in metadata.get_blocks(Flac_APPLICATION.BLOCK_ID):
if (block.application_id == "riff"):
chunk_id = block.data[0:4]
#combine APPLICATION metadata blocks up to "data" as header
if (chunk_id != "data"):
current_block.append(block.data)
else:
#combine APPLICATION metadata blocks past "data" as footer
current_block.append(block.data)
current_block = footer
#return tuple of header and footer
if ((len(header) != 0) or (len(footer) != 0)):
return ("".join(header), "".join(footer))
else:
#FIXME
raise ValueError("no foreign RIFF chunks")
@classmethod
def from_wave(cls, filename, header, pcmreader, footer, compression=None):
"""encodes a new file from wave data
takes a filename string, header string,
PCMReader object, footer string
and optional compression level string
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new WaveAudio object
may raise EncodingError if some problem occurs when
encoding the input file"""
from .bitstream import BitstreamReader
from .bitstream import BitstreamRecorder
from .bitstream import format_byte_size
import cStringIO
from .wav import (pad_data, WaveAudio)
from . import (EncodingError, CounterPCMReader)
#split header and footer into distinct chunks
header_len = len(header)
footer_len = len(footer)
fmt_found = False
blocks = []
try:
#read everything from start of header to "data<size>"
#chunk header
r = BitstreamReader(cStringIO.StringIO(header), 1)
(riff, remaining_size, wave) = r.parse("4b 32u 4b")
if (riff != "RIFF"):
from .text import ERR_WAV_NOT_WAVE
raise EncodingError(ERR_WAV_NOT_WAVE)
elif (wave != "WAVE"):
from .text import ERR_WAV_INVALID_WAVE
raise EncodingError(ERR_WAV_INVALID_WAVE)
else:
block_data = BitstreamRecorder(1)
block_data.build("4b 32u 4b", (riff, remaining_size, wave))
blocks.append(Flac_APPLICATION("riff", block_data.data()))
total_size = remaining_size + 8
header_len -= format_byte_size("4b 32u 4b")
while (header_len):
block_data = BitstreamRecorder(1)
(chunk_id, chunk_size) = r.parse("4b 32u")
#ensure chunk ID is valid
if (not frozenset(chunk_id).issubset(
WaveAudio.PRINTABLE_ASCII)):
from .text import ERR_WAV_INVALID_CHUNK
raise EncodingError(ERR_WAV_INVALID_CHUNK)
else:
header_len -= format_byte_size("4b 32u")
block_data.build("4b 32u", (chunk_id, chunk_size))
if (chunk_id == "data"):
#transfer only "data" chunk header to APPLICATION block
if (header_len != 0):
from .text import ERR_WAV_HEADER_EXTRA_DATA
raise EncodingError(ERR_WAV_HEADER_EXTRA_DATA %
(header_len))
elif (not fmt_found):
from .text import ERR_WAV_NO_FMT_CHUNK
raise EncodingError(ERR_WAV_NO_FMT_CHUNK)
else:
blocks.append(
Flac_APPLICATION("riff", block_data.data()))
data_chunk_size = chunk_size
break
elif (chunk_id == "fmt "):
if (not fmt_found):
fmt_found = True
if (chunk_size % 2):
#transfer padded chunk to APPLICATION block
block_data.write_bytes(
r.read_bytes(chunk_size + 1))
header_len -= (chunk_size + 1)
else:
#transfer un-padded chunk to APPLICATION block
block_data.write_bytes(
r.read_bytes(chunk_size))
header_len -= chunk_size
blocks.append(
Flac_APPLICATION("riff", block_data.data()))
else:
from .text import ERR_WAV_MULTIPLE_FMT
raise EncodingError(ERR_WAV_MULTIPLE_FMT)
else:
if (chunk_size % 2):
#transfer padded chunk to APPLICATION block
block_data.write_bytes(r.read_bytes(chunk_size + 1))
header_len -= (chunk_size + 1)
else:
#transfer un-padded chunk to APPLICATION block
block_data.write_bytes(r.read_bytes(chunk_size))
header_len -= chunk_size
blocks.append(Flac_APPLICATION("riff", block_data.data()))
else:
from .text import ERR_WAV_NO_DATA_CHUNK
raise EncodingError(ERR_WAV_NO_DATA_CHUNK)
except IOError:
from .text import ERR_WAV_HEADER_IOERROR
raise EncodingError(ERR_WAV_HEADER_IOERROR)
try:
#read everything from start of footer to end of footer
r = BitstreamReader(cStringIO.StringIO(footer), 1)
#skip initial footer pad byte
if (data_chunk_size % 2):
r.skip_bytes(1)
footer_len -= 1
while (footer_len):
block_data = BitstreamRecorder(1)
(chunk_id, chunk_size) = r.parse("4b 32u")
if (not frozenset(chunk_id).issubset(
WaveAudio.PRINTABLE_ASCII)):
#ensure chunk ID is valid
from .text import ERR_WAV_INVALID_CHUNK
raise EncodingError(ERR_WAV_INVALID_CHUNK)
elif (chunk_id == "fmt "):
#multiple "fmt " chunks is an error
from .text import ERR_WAV_MULTIPLE_FMT
raise EncodingError(ERR_WAV_MULTIPLE_FMT)
elif (chunk_id == "data"):
#multiple "data" chunks is an error
from .text import ERR_WAV_MULTIPLE_DATA
raise EncodingError(ERR_WAV_MULTIPLE_DATA)
else:
footer_len -= format_byte_size("4b 32u")
block_data.build("4b 32u", (chunk_id, chunk_size))
if (chunk_size % 2):
#transfer padded chunk to APPLICATION block
block_data.write_bytes(r.read_bytes(chunk_size + 1))
footer_len -= (chunk_size + 1)
else:
#transfer un-padded chunk to APPLICATION block
block_data.write_bytes(r.read_bytes(chunk_size))
footer_len -= chunk_size
blocks.append(Flac_APPLICATION("riff", block_data.data()))
except IOError:
from .text import ERR_WAV_FOOTER_IOERROR
raise EncodingError(ERR_WAV_FOOTER_IOERROR)
counter = CounterPCMReader(pcmreader)
#perform standard FLAC encode from PCMReader
flac = cls.from_pcm(filename, counter, compression)
data_bytes_written = counter.bytes_written()
#ensure processed PCM data equals size of "data" chunk
if (data_bytes_written != data_chunk_size):
cls.__unlink__(filename)
from .text import ERR_WAV_TRUNCATED_DATA_CHUNK
raise EncodingError(ERR_WAV_TRUNCATED_DATA_CHUNK)
#ensure total size of header + PCM + footer matches wav's header
if ((len(header) + data_bytes_written + len(footer)) != total_size):
cls.__unlink__(filename)
from .text import ERR_WAV_INVALID_SIZE
raise EncodingError(ERR_WAV_INVALID_SIZE)
#add chunks as APPLICATION metadata blocks
metadata = flac.get_metadata()
if (metadata is not None):
for block in blocks:
metadata.add_block(block)
flac.update_metadata(metadata)
#return encoded FLAC file
return flac
def has_foreign_aiff_chunks(self):
"""returns True if the audio file contains non-audio AIFF chunks"""
try:
metadata = self.get_metadata()
if (metadata is not None):
return 'aiff' in [
block.application_id for block in
metadata.get_blocks(Flac_APPLICATION.BLOCK_ID)]
else:
return False
except IOError:
return False
def aiff_header_footer(self):
"""returns (header, footer) tuple of strings
containing all data before and after the PCM stream
if self.has_foreign_aiff_chunks() is False,
may raise ValueError if the file has no header and footer
for any reason"""
from .aiff import pad_data
header = []
if (pad_data(self.total_frames(),
self.channels(),
self.bits_per_sample())):
footer = [chr(0)]
else:
footer = []
current_block = header
metadata = self.get_metadata()
if (metadata is None):
#FIXME
raise ValueError("no foreign AIFF chunks")
#convert individual chunks into combined header and footer strings
for block in metadata.get_blocks(Flac_APPLICATION.BLOCK_ID):
if (block.application_id == "aiff"):
chunk_id = block.data[0:4]
#combine APPLICATION metadata blocks up to "SSND" as header
if (chunk_id != "SSND"):
current_block.append(block.data)
else:
#combine APPLICATION metadata blocks past "SSND" as footer
current_block.append(block.data)
current_block = footer
#return tuple of header and footer
if ((len(header) != 0) or (len(footer) != 0)):
return ("".join(header), "".join(footer))
else:
#FIXME
raise ValueError("no foreign AIFF chunks")
@classmethod
def from_aiff(cls, filename, header, pcmreader, footer, compression=None):
"""encodes a new file from AIFF data
takes a filename string, header string,
PCMReader object, footer string
and optional compression level string
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new AiffAudio object
header + pcm data + footer should always result
in the original AIFF file being restored
without need for any padding bytes
may raise EncodingError if some problem occurs when
encoding the input file"""
from .bitstream import BitstreamReader
from .bitstream import BitstreamRecorder
from .bitstream import format_byte_size
import cStringIO
from .aiff import (pad_data, AiffAudio)
from . import (EncodingError, CounterPCMReader)
#split header and footer into distinct chunks
header_len = len(header)
footer_len = len(footer)
comm_found = False
blocks = []
try:
#read everything from start of header to "SSND<size>"
#chunk header
r = BitstreamReader(cStringIO.StringIO(header), 0)
(form, remaining_size, aiff) = r.parse("4b 32u 4b")
if (form != "FORM"):
from .text import ERR_AIFF_NOT_AIFF
raise EncodingError(ERR_AIFF_NOT_AIFF)
elif (aiff != "AIFF"):
from .text import ERR_AIFF_INVALID_AIFF
raise EncodingError(ERR_AIFF_INVALID_AIFF)
else:
block_data = BitstreamRecorder(0)
block_data.build("4b 32u 4b", (form, remaining_size, aiff))
blocks.append(Flac_APPLICATION("aiff", block_data.data()))
total_size = remaining_size + 8
header_len -= format_byte_size("4b 32u 4b")
while (header_len):
block_data = BitstreamRecorder(0)
(chunk_id, chunk_size) = r.parse("4b 32u")
#ensure chunk ID is valid
if (not frozenset(chunk_id).issubset(
AiffAudio.PRINTABLE_ASCII)):
from .text import ERR_AIFF_INVALID_CHUNK
raise EncodingError(ERR_AIFF_INVALID_CHUNK)
else:
header_len -= format_byte_size("4b 32u")
block_data.build("4b 32u", (chunk_id, chunk_size))
if (chunk_id == "SSND"):
#transfer only "SSND" chunk header to APPLICATION block
#(including 8 bytes after ID/size header)
if (header_len > 8):
from .text import ERR_AIFF_HEADER_EXTRA_SSND
raise EncodingError(ERR_AIFF_HEADER_EXTRA_SSND)
elif (header_len < 8):
from .text import ERR_AIFF_HEADER_MISSING_SSND
raise EncodingError(ERR_AIFF_HEADER_MISSING_SSND)
elif (not comm_found):
from .text import ERR_AIFF_NO_COMM_CHUNK
raise EncodingError(ERR_AIFF_NO_COMM_CHUNK)
else:
block_data.write_bytes(r.read_bytes(8))
blocks.append(
Flac_APPLICATION("aiff", block_data.data()))
ssnd_chunk_size = (chunk_size - 8)
break
elif (chunk_id == "COMM"):
if (not comm_found):
comm_found = True
if (chunk_size % 2):
#transfer padded chunk to APPLICATION block
block_data.write_bytes(
r.read_bytes(chunk_size + 1))
header_len -= (chunk_size + 1)
else:
#transfer un-padded chunk to APPLICATION block
block_data.write_bytes(
r.read_bytes(chunk_size))
header_len -= chunk_size
blocks.append(
Flac_APPLICATION("aiff", block_data.data()))
else:
from .text import ERR_AIFF_MULTIPLE_COMM_CHUNKS
raise EncodingError(ERR_AIFF_MULTIPLE_COMM_CHUNKS)
else:
if (chunk_size % 2):
#transfer padded chunk to APPLICATION block
block_data.write_bytes(r.read_bytes(chunk_size + 1))
header_len -= (chunk_size + 1)
else:
#transfer un-padded chunk to APPLICATION block
block_data.write_bytes(r.read_bytes(chunk_size))
header_len -= chunk_size
blocks.append(Flac_APPLICATION("aiff", block_data.data()))
else:
from .text import ERR_AIFF_NO_SSND_CHUNK
raise EncodingError(ERR_AIFF_NO_SSND_CHUNK)
except IOError:
from .text import ERR_AIFF_HEADER_IOERROR
raise EncodingError(ERR_AIFF_HEADER_IOERROR)
try:
#read everything from start of footer to end of footer
r = BitstreamReader(cStringIO.StringIO(footer), 0)
#skip initial footer pad byte
if (ssnd_chunk_size % 2):
r.skip_bytes(1)
footer_len -= 1
while (footer_len):
block_data = BitstreamRecorder(0)
(chunk_id, chunk_size) = r.parse("4b 32u")
if (not frozenset(chunk_id).issubset(
AiffAudio.PRINTABLE_ASCII)):
#ensure chunk ID is valid
from .text import ERR_AIFF_INVALID_CHUNK
raise EncodingError(ERR_AIFF_INVALID_CHUNK)
elif (chunk_id == "COMM"):
#multiple "COMM" chunks is an error
from .text import ERR_AIFF_MULTIPLE_COMM_CHUNKS
raise EncodingError(ERR_AIFF_MULTIPLE_COMM_CHUNKS)
elif (chunk_id == "SSND"):
#multiple "SSND" chunks is an error
from .text import ERR_AIFF_MULTIPLE_SSND_CHUNKS
raise EncodingError(ERR_AIFF_MULTIPLE_SSND_CHUNKS)
else:
footer_len -= format_byte_size("4b 32u")
block_data.build("4b 32u", (chunk_id, chunk_size))
if (chunk_size % 2):
#transfer padded chunk to APPLICATION block
block_data.write_bytes(r.read_bytes(chunk_size + 1))
footer_len -= (chunk_size + 1)
else:
#transfer un-padded chunk to APPLICATION block
block_data.write_bytes(r.read_bytes(chunk_size))
footer_len -= chunk_size
blocks.append(Flac_APPLICATION("aiff", block_data.data()))
except IOError:
from .text import ERR_AIFF_FOOTER_IOERROR
raise EncodingError(ERR_AIFF_FOOTER_IOERROR)
counter = CounterPCMReader(pcmreader)
#perform standard FLAC encode from PCMReader
flac = cls.from_pcm(filename, counter, compression)
ssnd_bytes_written = counter.bytes_written()
#ensure processed PCM data equals size of "SSND" chunk
if (ssnd_bytes_written != ssnd_chunk_size):
cls.__unlink__(filename)
from .text import ERR_AIFF_TRUNCATED_SSND_CHUNK
raise EncodingError(ERR_AIFF_TRUNCATED_SSND_CHUNK)
#ensure total size of header + PCM + footer matches aiff's header
if ((len(header) + ssnd_bytes_written + len(footer)) != total_size):
cls.__unlink__(filename)
from .text import ERR_AIFF_INVALID_SIZE
raise EncodingError(ERR_AIFF_INVALID_SIZE)
#add chunks as APPLICATION metadata blocks
metadata = flac.get_metadata()
if (metadata is not None):
for block in blocks:
metadata.add_block(block)
flac.update_metadata(metadata)
#return encoded FLAC file
return flac
def convert(self, target_path, target_class, compression=None,
progress=None):
"""encodes a new AudioFile from existing AudioFile
take a filename string, target class and optional compression string
encodes a new AudioFile in the target class and returns
the resulting object
may raise EncodingError if some problem occurs during encoding"""
#If a FLAC has embedded RIFF *and* embedded AIFF chunks,
#RIFF takes precedence if the target format supports both.
#It's hard to envision a scenario in which that would happen.
import tempfile
from . import WaveAudio
from . import AiffAudio
from . import to_pcm_progress
if ((self.has_foreign_wave_chunks() and
hasattr(target_class, "from_wave") and
callable(target_class.from_wave))):
return WaveContainer.convert(self,
target_path,
target_class,
compression,
progress)
elif (self.has_foreign_aiff_chunks() and
hasattr(target_class, "from_aiff") and
callable(target_class.from_aiff)):
return AiffContainer.convert(self,
target_path,
target_class,
compression,
progress)
else:
return target_class.from_pcm(target_path,
to_pcm_progress(self, progress),
compression)
def bits_per_sample(self):
"""returns an integer number of bits-per-sample this track contains"""
return self.__bitspersample__
def channels(self):
"""returns an integer number of channels this track contains"""
return self.__channels__
def total_frames(self):
"""returns the total PCM frames of the track as an integer"""
return self.__total_frames__
def sample_rate(self):
"""returns the rate of the track's audio as an integer number of Hz"""
return self.__samplerate__
def __read_streaminfo__(self):
valid_header_types = frozenset(range(0, 6 + 1))
f = file(self.filename, "rb")
try:
self.__stream_offset__ = skip_id3v2_comment(f)
f.read(4)
from .bitstream import BitstreamReader
reader = BitstreamReader(f, 0)
stop = 0
while (stop == 0):
(stop, header_type, length) = reader.parse("1u 7u 24u")
if (header_type not in valid_header_types):
from .text import ERR_FLAC_INVALID_BLOCK
raise InvalidFLAC(ERR_FLAC_INVALID_BLOCK)
elif (header_type == 0):
(self.__samplerate__,
self.__channels__,
self.__bitspersample__,
self.__total_frames__,
self.__md5__) = reader.parse("80p 20u 3u 5u 36U 16b")
self.__channels__ += 1
self.__bitspersample__ += 1
break
else:
#though the STREAMINFO should always be first,
#we'll be permissive and check them all if necessary
reader.skip_bytes(length)
finally:
f.close()
@classmethod
def can_add_replay_gain(cls, audiofiles):
"""given a list of audiofiles,
returns True if this class can add ReplayGain to those files
returns False if not"""
for audiofile in audiofiles:
if (not isinstance(audiofile, FlacAudio)):
return False
else:
return True
@classmethod
def add_replay_gain(cls, filenames, progress=None):
"""adds ReplayGain values to a list of filename strings
all the filenames must be of this AudioFile type
raises ValueError if some problem occurs during ReplayGain application
"""
from . import open_files
from . import calculate_replay_gain
tracks = [track for track in open_files(filenames) if
isinstance(track, cls)]
if (len(tracks) > 0):
for (track,
track_gain,
track_peak,
album_gain,
album_peak) in calculate_replay_gain(tracks, progress):
try:
metadata = track.get_metadata()
if (metadata is None):
return
except IOError:
return
try:
comment = metadata.get_block(
Flac_VORBISCOMMENT.BLOCK_ID)
except IndexError:
comment = Flac_VORBISCOMMENT(
[], u"Python Audio Tools %s" % (VERSION))
metadata.add_block(comment)
comment["REPLAYGAIN_TRACK_GAIN"] = [
"%1.2f dB" % (track_gain)]
comment["REPLAYGAIN_TRACK_PEAK"] = [
"%1.8f" % (track_peak)]
comment["REPLAYGAIN_ALBUM_GAIN"] = [
"%1.2f dB" % (album_gain)]
comment["REPLAYGAIN_ALBUM_PEAK"] = ["%1.8f" % (album_peak)]
comment["REPLAYGAIN_REFERENCE_LOUDNESS"] = [u"89.0 dB"]
track.update_metadata(metadata)
@classmethod
def supports_replay_gain(cls):
"""returns True if this class supports ReplayGain"""
return True
@classmethod
def lossless_replay_gain(cls):
"""returns True"""
return True
def replay_gain(self):
"""returns a ReplayGain object of our ReplayGain values
returns None if we have no values"""
from . import ReplayGain
try:
metadata = self.get_metadata()
if (metadata is not None):
vorbis_metadata = metadata.get_block(
Flac_VORBISCOMMENT.BLOCK_ID)
else:
return None
except (IndexError, IOError):
return None
if (set(['REPLAYGAIN_TRACK_PEAK', 'REPLAYGAIN_TRACK_GAIN',
'REPLAYGAIN_ALBUM_PEAK', 'REPLAYGAIN_ALBUM_GAIN']).issubset(
[key.upper() for key in vorbis_metadata.keys()])):
# we have ReplayGain data
try:
return ReplayGain(
vorbis_metadata['REPLAYGAIN_TRACK_GAIN'][0][0:-len(" dB")],
vorbis_metadata['REPLAYGAIN_TRACK_PEAK'][0],
vorbis_metadata['REPLAYGAIN_ALBUM_GAIN'][0][0:-len(" dB")],
vorbis_metadata['REPLAYGAIN_ALBUM_PEAK'][0])
except ValueError:
return None
else:
return None
def __eq__(self, audiofile):
if (isinstance(audiofile, FlacAudio)):
return self.__md5__ == audiofile.__md5__
elif (isinstance(audiofile, AudioFile)):
from . import FRAMELIST_SIZE
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
p = audiofile.to_pcm()
m = md5()
s = p.read(FRAMELIST_SIZE)
while (len(s) > 0):
m.update(s.to_bytes(False, True))
s = p.read(FRAMELIST_SIZE)
p.close()
return m.digest() == self.__md5__
else:
return False
def clean(self, fixes_performed, output_filename=None):
"""cleans the file of known data and metadata problems
fixes_performed is a list-like object which is appended
with Unicode strings of fixed problems
output_filename is an optional filename of the fixed file
if present, a new AudioFile is returned
otherwise, only a dry-run is performed and no new file is written
raises IOError if unable to write the file or its metadata
"""
import os.path
from . import VERSION
def seektable_valid(seektable, metadata_offset, input_file):
from .bitstream import BitstreamReader
reader = BitstreamReader(input_file, 0)
for (pcm_frame_offset,
seekpoint_offset,
pcm_frame_count) in seektable.seekpoints:
input_file.seek(seekpoint_offset + metadata_offset)
try:
(sync_code,
reserved1,
reserved2) = reader.parse(
"14u 1u 1p 4p 4p 4p 3p 1u")
if (((sync_code != 0x3FFE) or
(reserved1 != 0) or
(reserved2 != 0))):
return False
except IOError:
return False
else:
return True
if (output_filename is None):
#dry run only
input_f = open(self.filename, "rb")
try:
#remove ID3 tags from before and after FLAC stream
stream_offset = skip_id3v2_comment(input_f)
if (stream_offset > 0):
from .text import CLEAN_FLAC_REMOVE_ID3V2
fixes_performed.append(CLEAN_FLAC_REMOVE_ID3V2)
try:
input_f.seek(-128, 2)
if (input_f.read(3) == 'TAG'):
from .text import CLEAN_FLAC_REMOVE_ID3V1
fixes_performed.append(CLEAN_FLAC_REMOVE_ID3V1)
except IOError:
#file isn't 128 bytes long
pass
#fix empty MD5SUM
if (self.__md5__ == chr(0) * 16):
from .text import CLEAN_FLAC_POPULATE_MD5
fixes_performed.append(CLEAN_FLAC_POPULATE_MD5)
metadata = self.get_metadata()
if (metadata is None):
return
#fix missing WAVEFORMATEXTENSIBLE_CHANNEL_MASK
if ((self.channels() > 2) or (self.bits_per_sample() > 16)):
from .text import CLEAN_FLAC_ADD_CHANNELMASK
try:
if (u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK" not in
metadata.get_block(
Flac_VORBISCOMMENT.BLOCK_ID).keys()):
fixes_performed.append(CLEAN_FLAC_ADD_CHANNELMASK)
except IndexError:
fixes_performed.append(CLEAN_FLAC_ADD_CHANNELMASK)
#fix an invalid SEEKTABLE, if present
try:
if (not seektable_valid(
metadata.get_block(Flac_SEEKTABLE.BLOCK_ID),
stream_offset + 4 + self.metadata_length(),
input_f)):
from .text import CLEAN_FLAC_FIX_SEEKTABLE
fixes_performed.append(CLEAN_FLAC_FIX_SEEKTABLE)
except IndexError:
pass
#fix any remaining metadata problems
metadata.clean(fixes_performed)
finally:
input_f.close()
else:
#perform complete fix
input_f = open(self.filename, "rb")
try:
#remove ID3 tags from before and after FLAC stream
stream_size = os.path.getsize(self.filename)
stream_offset = skip_id3v2_comment(input_f)
if (stream_offset > 0):
from .text import CLEAN_FLAC_REMOVE_ID3V2
fixes_performed.append(CLEAN_FLAC_REMOVE_ID3V2)
stream_size -= stream_offset
try:
input_f.seek(-128, 2)
if (input_f.read(3) == 'TAG'):
from .text import CLEAN_FLAC_REMOVE_ID3V1
fixes_performed.append(CLEAN_FLAC_REMOVE_ID3V1)
stream_size -= 128
except IOError:
#file isn't 128 bytes long
pass
output_f = open(output_filename, "wb")
try:
input_f.seek(stream_offset, 0)
while (stream_size > 0):
s = input_f.read(4096)
if (len(s) > stream_size):
s = s[0:stream_size]
output_f.write(s)
stream_size -= len(s)
finally:
output_f.close()
output_track = self.__class__(output_filename)
metadata = self.get_metadata()
if (metadata is not None):
#fix empty MD5SUM
if (self.__md5__ == chr(0) * 16):
from hashlib import md5
from . import transfer_framelist_data
md5sum = md5()
transfer_framelist_data(
self.to_pcm(),
md5sum.update,
signed=True,
big_endian=False)
metadata.get_block(
Flac_STREAMINFO.BLOCK_ID).md5sum = md5sum.digest()
from .text import CLEAN_FLAC_POPULATE_MD5
fixes_performed.append(CLEAN_FLAC_POPULATE_MD5)
#fix missing WAVEFORMATEXTENSIBLE_CHANNEL_MASK
if (((self.channels() > 2) or
(self.bits_per_sample() > 16))):
try:
vorbis_comment = metadata.get_block(
Flac_VORBISCOMMENT.BLOCK_ID)
except IndexError:
vorbis_comment = Flac_VORBISCOMMENT(
[], u"Python Audio Tools %s" % (VERSION))
if ((u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK" not in
vorbis_comment.keys())):
from .text import CLEAN_FLAC_ADD_CHANNELMASK
fixes_performed.append(CLEAN_FLAC_ADD_CHANNELMASK)
vorbis_comment[
u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"] = \
[u"0x%.4X" % (self.channel_mask())]
metadata.replace_blocks(
Flac_VORBISCOMMENT.BLOCK_ID,
[vorbis_comment])
#fix an invalid SEEKTABLE, if present
try:
if (not seektable_valid(
metadata.get_block(Flac_SEEKTABLE.BLOCK_ID),
stream_offset + 4 + self.metadata_length(),
input_f)):
from .text import CLEAN_FLAC_FIX_SEEKTABLE
fixes_performed.append(CLEAN_FLAC_FIX_SEEKTABLE)
metadata.replace_blocks(Flac_SEEKTABLE.BLOCK_ID,
[self.seektable()])
except IndexError:
pass
#fix remaining metadata problems
#which automatically shifts STREAMINFO to the right place
#(the message indicating the fix has already been output)
output_track.update_metadata(
metadata.clean(fixes_performed))
return output_track
finally:
input_f.close()
class FLAC_Data_Chunk:
def __init__(self, total_frames, pcmreader):
self.id = "data"
self.__total_frames__ = total_frames
self.__pcmreader__ = pcmreader
def __repr__(self):
return "FLAC_Data_Chunk()"
def size(self):
"""returns size of chunk in bytes
not including any spacer byte for odd-sized chunks"""
return (self.__total_frames__ *
self.__pcmreader__.channels *
(self.__pcmreader__.bits_per_sample / 8))
def verify(self):
"returns True"
return True
def write(self, f):
"""writes the entire chunk to the given output file object
returns size of entire chunk (including header and spacer)
in bytes"""
from struct import pack
from . import FRAMELIST_SIZE
f.write(self.id)
f.write(pack("<I", self.size()))
bytes_written = 8
signed = (self.__pcmreader__.bits_per_sample > 8)
s = self.__pcmreader__.read(FRAMELIST_SIZE)
while (len(s) > 0):
b = s.to_bytes(False, signed)
f.write(b)
bytes_written += len(b)
s = self.__pcmreader__.read(FRAMELIST_SIZE)
if (bytes_written % 2):
f.write(chr(0))
bytes_written += 1
return bytes_written
class FLAC_SSND_Chunk(FLAC_Data_Chunk):
def __init__(self, total_frames, pcmreader):
self.id = "SSND"
self.__total_frames__ = total_frames
self.__pcmreader__ = pcmreader
def __repr__(self):
return "FLAC_SSND_Chunk()"
def size(self):
"""returns size of chunk in bytes
not including any spacer byte for odd-sized chunks"""
return 8 + (self.__total_frames__ *
self.__pcmreader__.channels *
(self.__pcmreader__.bits_per_sample / 8))
def write(self, f):
"""writes the entire chunk to the given output file object
returns size of entire chunk (including header and spacer)
in bytes"""
from struct import pack
from . import FRAMELIST_SIZE
f.write(self.id)
f.write(pack(">I", self.size()))
bytes_written = 8
f.write(pack(">II", 0, 0))
bytes_written += 8
s = self.__pcmreader__.read(FRAMELIST_SIZE)
while (len(s) > 0):
b = s.to_bytes(True, True)
f.write(b)
bytes_written += len(b)
s = self.__pcmreader__.read(FRAMELIST_SIZE)
if (bytes_written % 2):
f.write(chr(0))
bytes_written += 1
return bytes_written
#######################
#Ogg FLAC
#######################
class OggFlacMetaData(FlacMetaData):
@classmethod
def converted(cls, metadata):
"""takes a MetaData object and returns an OggFlacMetaData object"""
if (metadata is None):
return None
elif (isinstance(metadata, FlacMetaData)):
return cls([block.copy() for block in metadata.block_list])
else:
return cls([Flac_VORBISCOMMENT.converted(metadata)] +
[Flac_PICTURE.converted(image)
for image in metadata.images()])
def __repr__(self):
return ("OggFlacMetaData(%s)" % (repr(self.block_list)))
@classmethod
def parse(cls, reader):
"""returns an OggFlacMetaData object from the given BitstreamReader
raises IOError or ValueError if an error occurs reading MetaData"""
from .ogg import read_ogg_packets
streaminfo = None
applications = []
seektable = None
vorbis_comment = None
cuesheet = None
pictures = []
packets = read_ogg_packets(reader)
streaminfo_packet = packets.next()
streaminfo_packet.set_endianness(0)
(packet_byte,
ogg_signature,
major_version,
minor_version,
header_packets,
flac_signature,
block_type,
block_length,
minimum_block_size,
maximum_block_size,
minimum_frame_size,
maximum_frame_size,
sample_rate,
channels,
bits_per_sample,
total_samples,
md5sum) = streaminfo_packet.parse(
"8u 4b 8u 8u 16u 4b 8u 24u 16u 16u 24u 24u 20u 3u 5u 36U 16b")
block_list = [Flac_STREAMINFO(minimum_block_size=minimum_block_size,
maximum_block_size=maximum_block_size,
minimum_frame_size=minimum_frame_size,
maximum_frame_size=maximum_frame_size,
sample_rate=sample_rate,
channels=channels + 1,
bits_per_sample=bits_per_sample + 1,
total_samples=total_samples,
md5sum=md5sum)]
for (i, packet) in zip(range(header_packets), packets):
packet.set_endianness(0)
(block_type, length) = packet.parse("1p 7u 24u")
if (block_type == 1): # PADDING
block_list.append(Flac_PADDING.parse(packet, length))
if (block_type == 2): # APPLICATION
block_list.append(Flac_APPLICATION.parse(packet, length))
elif (block_type == 3): # SEEKTABLE
block_list.append(Flac_SEEKTABLE.parse(packet, length / 18))
elif (block_type == 4): # VORBIS_COMMENT
block_list.append(Flac_VORBISCOMMENT.parse(packet))
elif (block_type == 5): # CUESHEET
block_list.append(Flac_CUESHEET.parse(packet))
elif (block_type == 6): # PICTURE
block_list.append(Flac_PICTURE.parse(packet))
elif ((block_type >= 7) and (block_type <= 126)):
from .text import ERR_FLAC_RESERVED_BLOCK
raise ValueError(ERR_FLAC_RESERVED_BLOCK % (block_type))
elif (block_type == 127):
from .text import ERR_FLAC_INVALID_BLOCK
raise ValueError(ERR_FLAC_INVALID_BLOCK)
return cls(block_list)
def build(self, oggwriter):
"""oggwriter is an OggStreamWriter-compatible object"""
from .bitstream import BitstreamRecorder
from .bitstream import format_size
from . import iter_first, iter_last
packet = BitstreamRecorder(0)
#build extended Ogg FLAC STREAMINFO block
#which will always occupy its own page
streaminfo = self.get_block(Flac_STREAMINFO.BLOCK_ID)
#all our non-STREAMINFO blocks that are small enough
#to fit in the output stream
valid_blocks = [b for b in self.blocks()
if ((b.BLOCK_ID != Flac_STREAMINFO.BLOCK_ID) and
(b.size() < (2 ** 24)))]
packet.build(
"8u 4b 8u 8u 16u 4b 8u 24u 16u 16u 24u 24u 20u 3u 5u 36U 16b",
(0x7F, "FLAC", 1, 0, len(valid_blocks), "fLaC", 0,
format_size("16u 16u 24u 24u 20u 3u 5u 36U 16b") / 8,
streaminfo.minimum_block_size,
streaminfo.maximum_block_size,
streaminfo.minimum_frame_size,
streaminfo.maximum_frame_size,
streaminfo.sample_rate,
streaminfo.channels - 1,
streaminfo.bits_per_sample - 1,
streaminfo.total_samples,
streaminfo.md5sum))
oggwriter.write_page(0, [packet.data()], 0, 1, 0)
#FIXME - adjust non-STREAMINFO blocks to use fewer pages
#pack remaining metadata blocks into as few pages as possible
for (last_block, block) in iter_last(iter(valid_blocks)):
packet.reset()
if (not last_block):
packet.build("1u 7u 24u", (0, block.BLOCK_ID, block.size()))
else:
packet.build("1u 7u 24u", (1, block.BLOCK_ID, block.size()))
block.build(packet)
for (first_page, page_segments) in iter_first(
oggwriter.segments_to_pages(
oggwriter.packet_to_segments(packet.data()))):
oggwriter.write_page(0 if first_page else -1,
page_segments,
0 if first_page else 1, 0, 0)
class __Counter__:
def __init__(self):
self.value = 0
def count_byte(self, i):
self.value += 1
def __int__(self):
return self.value
class OggFlacAudio(FlacAudio):
"""a Free Lossless Audio Codec file inside an Ogg container"""
from .text import (COMP_FLAC_0, COMP_FLAC_8)
SUFFIX = "oga"
NAME = SUFFIX
DESCRIPTION = u"Ogg FLAC"
DEFAULT_COMPRESSION = "8"
COMPRESSION_MODES = tuple(map(str, range(0, 9)))
COMPRESSION_DESCRIPTIONS = {"0": COMP_FLAC_0,
"8": COMP_FLAC_8}
BINARIES = ("flac",)
METADATA_CLASS = OggFlacMetaData
def __init__(self, filename):
"""filename is a plain string"""
AudioFile.__init__(self, filename)
self.__samplerate__ = 0
self.__channels__ = 0
self.__bitspersample__ = 0
self.__total_frames__ = 0
try:
self.__read_streaminfo__()
except IOError, msg:
raise InvalidFLAC(str(msg))
def bits_per_sample(self):
"""returns an integer number of bits-per-sample this track contains"""
return self.__bitspersample__
def channels(self):
"""returns an integer number of channels this track contains"""
return self.__channels__
def total_frames(self):
"""returns the total PCM frames of the track as an integer"""
return self.__total_frames__
def sample_rate(self):
"""returns the rate of the track's audio as an integer number of Hz"""
return self.__samplerate__
def get_metadata(self):
"""returns a MetaData object, or None
raise ValueError if some error reading metadata
raises IOError if unable to read the file"""
f = open(self.filename, "rb")
try:
from .bitstream import BitstreamReader
try:
return OggFlacMetaData.parse(BitstreamReader(f, 1))
except ValueError:
return None
finally:
f.close()
def update_metadata(self, metadata):
"""takes this track's current MetaData object
as returned by get_metadata() and sets this track's metadata
with any fields updated in that object
raises IOError if unable to write the file
"""
if (metadata is None):
return None
if (not isinstance(metadata, OggFlacMetaData)):
from .text import ERR_FOREIGN_METADATA
raise ValueError(ERR_FOREIGN_METADATA)
#always overwrite Ogg FLAC with fresh metadata
#
#The trouble with Ogg FLAC padding is that Ogg header overhead
#requires a variable amount of overhead bytes per Ogg page
#which makes it very difficult to calculate how many
#bytes to allocate to the PADDING packet.
#We'd have to build a bunch of empty pages for padding
#then go back and fill-in the initial padding page's length
#field before re-checksumming it.
import tempfile
from .bitstream import BitstreamWriter
from .bitstream import BitstreamRecorder
from .bitstream import BitstreamAccumulator
from .bitstream import BitstreamReader
from .ogg import OggStreamReader, OggStreamWriter
from . import transfer_data
new_file = tempfile.TemporaryFile()
try:
original_file = file(self.filename, 'rb')
try:
original_reader = BitstreamReader(original_file, 1)
original_ogg = OggStreamReader(original_reader)
new_writer = BitstreamWriter(new_file, 1)
new_ogg = OggStreamWriter(new_writer,
self.__serial_number__)
#write our new comment blocks to the new file
metadata.build(new_ogg)
#skip the metadata packets in the original file
OggFlacMetaData.parse(original_reader)
#transfer the remaining pages from the original file
#(which are re-sequenced and re-checksummed automatically)
for (granule_position,
segments,
continuation,
first_page,
last_page) in original_ogg.pages():
new_ogg.write_page(granule_position,
segments,
continuation,
first_page,
last_page)
finally:
original_file.close()
#copy temporary file data over our original file
original_file = file(self.filename, "wb")
try:
new_file.seek(0, 0)
transfer_data(new_file.read, original_file.write)
new_file.close()
finally:
original_file.close()
finally:
new_file.close()
def metadata_length(self):
"""returns the length of all Ogg FLAC metadata blocks as an integer
this includes all Ogg page headers"""
from .bitstream import BitstreamReader
f = file(self.filename, 'rb')
try:
byte_count = __Counter__()
ogg_stream = BitstreamReader(f, 1)
ogg_stream.add_callback(byte_count.count_byte)
OggFlacMetaData.parse(ogg_stream)
return int(byte_count)
finally:
f.close()
def __read_streaminfo__(self):
from .bitstream import BitstreamReader
f = open(self.filename, "rb")
try:
ogg_reader = BitstreamReader(f, 1)
(magic_number,
version,
header_type,
granule_position,
self.__serial_number__,
page_sequence_number,
checksum,
segment_count) = ogg_reader.parse("4b 8u 8u 64S 32u 32u 32u 8u")
if (magic_number != 'OggS'):
from .text import ERR_OGG_INVALID_MAGIC_NUMBER
raise InvalidFLAC(ERR_OGG_INVALID_MAGIC_NUMBER)
if (version != 0):
from .text import ERR_OGG_INVALID_VERSION
raise InvalidFLAC(ERR_OGG_INVALID_VERSION)
segment_length = ogg_reader.read(8)
ogg_reader.set_endianness(0)
(packet_byte,
ogg_signature,
major_version,
minor_version,
self.__header_packets__,
flac_signature,
block_type,
block_length,
minimum_block_size,
maximum_block_size,
minimum_frame_size,
maximum_frame_size,
self.__samplerate__,
self.__channels__,
self.__bitspersample__,
self.__total_frames__,
self.__md5__) = ogg_reader.parse(
"8u 4b 8u 8u 16u 4b 8u 24u 16u 16u 24u 24u 20u 3u 5u 36U 16b")
if (packet_byte != 0x7F):
from .text import ERR_OGGFLAC_INVALID_PACKET_BYTE
raise InvalidFLAC(ERR_OGGFLAC_INVALID_PACKET_BYTE)
if (ogg_signature != 'FLAC'):
from .text import ERR_OGGFLAC_INVALID_OGG_SIGNATURE
raise InvalidFLAC(ERR_OGGFLAC_INVALID_OGG_SIGNATURE)
if (major_version != 1):
from .text import ERR_OGGFLAC_INVALID_MAJOR_VERSION
raise InvalidFLAC(ERR_OGGFLAC_INVALID_MAJOR_VERSION)
if (minor_version != 0):
from .text import ERR_OGGFLAC_INVALID_MINOR_VERSION
raise InvalidFLAC(ERR_OGGFLAC_INVALID_MINOR_VERSION)
if (flac_signature != 'fLaC'):
from .text import ERR_OGGFLAC_VALID_FLAC_SIGNATURE
raise InvalidFLAC(ERR_OGGFLAC_VALID_FLAC_SIGNATURE)
self.__channels__ += 1
self.__bitspersample__ += 1
finally:
f.close()
def to_pcm(self):
"""returns a PCMReader object containing the track's PCM data"""
from . import decoders
from . import PCMReaderError
try:
return decoders.OggFlacDecoder(self.filename,
self.channel_mask())
except (IOError, ValueError), msg:
#The only time this is likely to occur is
#if the Ogg FLAC is modified between when OggFlacAudio
#is initialized and when to_pcm() is called.
return PCMReaderError(error_message=str(msg),
sample_rate=self.sample_rate(),
channels=self.channels(),
channel_mask=int(self.channel_mask()),
bits_per_sample=self.bits_per_sample())
@classmethod
def from_pcm(cls, filename, pcmreader, compression=None):
"""encodes a new file from PCM data
takes a filename string, PCMReader object
and optional compression level string
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new OggFlacAudio object"""
from . import BIN
from . import transfer_framelist_data
from . import ignore_sigint
from . import EncodingError
from . import DecodingError
from . import UnsupportedChannelCount
from . import __default_quality__
import subprocess
import os
SUBSTREAM_SAMPLE_RATES = frozenset([8000, 16000, 22050, 24000, 32000,
44100, 48000, 96000])
SUBSTREAM_BITS = frozenset([8, 12, 16, 20, 24])
if ((compression is None) or (compression not in
cls.COMPRESSION_MODES)):
compression = __default_quality__(cls.NAME)
if (((pcmreader.sample_rate in SUBSTREAM_SAMPLE_RATES) and
(pcmreader.bits_per_sample in SUBSTREAM_BITS))):
lax = []
else:
lax = ["--lax"]
if (pcmreader.channels > 8):
raise UnsupportedChannelCount(filename, pcmreader.channels)
if (int(pcmreader.channel_mask) == 0):
if (pcmreader.channels <= 6):
channel_mask = {1: 0x0004,
2: 0x0003,
3: 0x0007,
4: 0x0033,
5: 0x0037,
6: 0x003F}[pcmreader.channels]
else:
channel_mask = 0
elif (int(pcmreader.channel_mask) not in
(0x0001, # 1ch - mono
0x0004, # 1ch - mono
0x0003, # 2ch - left, right
0x0007, # 3ch - left, right, center
0x0033, # 4ch - left, right, back left, back right
0x0603, # 4ch - left, right, side left, side right
0x0037, # 5ch - L, R, C, back left, back right
0x0607, # 5ch - L, R, C, side left, side right
0x003F, # 6ch - L, R, C, LFE, back left, back right
0x060F)): # 6ch - L, R, C, LFE, side left, side right
from . import UnsupportedChannelMask
raise UnsupportedChannelMask(filename,
int(pcmreader.channel_mask))
else:
channel_mask = int(pcmreader.channel_mask)
devnull = file(os.devnull, 'ab')
sub = subprocess.Popen([BIN['flac']] + lax +
["-s", "-f", "-%s" % (compression),
"-V", "--ogg",
"--endian=little",
"--channels=%d" % (pcmreader.channels),
"--bps=%d" % (pcmreader.bits_per_sample),
"--sample-rate=%d" % (pcmreader.sample_rate),
"--sign=signed",
"--force-raw-format",
"-o", filename, "-"],
stdin=subprocess.PIPE,
stdout=devnull,
stderr=devnull,
preexec_fn=ignore_sigint,
creationflags=0x08000000)
try:
transfer_framelist_data(pcmreader, sub.stdin.write)
except (ValueError, IOError), err:
sub.stdin.close()
sub.wait()
cls.__unlink__(filename)
raise EncodingError(str(err))
except Exception, err:
sub.stdin.close()
sub.wait()
cls.__unlink__(filename)
raise err
try:
pcmreader.close()
except DecodingError, err:
raise EncodingError(err.error_message)
sub.stdin.close()
devnull.close()
if (sub.wait() == 0):
oggflac = OggFlacAudio(filename)
if ((((pcmreader.channels > 2) or
(pcmreader.bits_per_sample > 16)) and
(channel_mask != 0))):
metadata = oggflac.get_metadata()
vorbis = metadata.get_block(Flac_VORBISCOMMENT.BLOCK_ID)
vorbis[u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"] = [
u"0x%.4X" % (channel_mask)]
oggflac.update_metadata(metadata)
return oggflac
else:
#FIXME
raise EncodingError(u"error encoding file with flac")
def sub_pcm_tracks(self):
"""yields a PCMReader object per cuesheet track
this currently does nothing since the FLAC reference
decoder has limited support for Ogg FLAC
"""
return iter([])
def verify(self, progress=None):
"""verifies the current file for correctness
returns True if the file is okay
raises an InvalidFile with an error message if there is
some problem with the file"""
from .verify import ogg as verify_ogg_stream
#Ogg stream verification is likely to be so fast
#that individual calls to progress() are
#a waste of time.
if (progress is not None):
progress(0, 1)
try:
f = open(self.filename, 'rb')
except IOError, err:
raise InvalidFLAC(str(err))
try:
try:
result = verify_ogg_stream(f)
if (progress is not None):
progress(1, 1)
return result is None
except (IOError, ValueError), err:
raise InvalidFLAC(str(err))
finally:
f.close()
|
R-a-dio/python-audio-tools
|
audiotools/flac.py
|
Python
|
gpl-2.0
| 125,969
|
[
"Brian"
] |
3b6b2f14fe455e868879b6c4e10b47b5d50cddb8bc02c8fe621378eb8d75f71f
|
import sys
import os
import glob
import difflib
import gzip
import contextlib
import inspect
import subprocess
import tempfile
import pysam
WORKDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"pysam_test_work"))
BAM_DATADIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"pysam_data"))
TABIX_DATADIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"tabix_data"))
CBCF_DATADIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"cbcf_data"))
LINKDIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "linker_tests"))
TESTS_TEMPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "tmp"))
IS_PYTHON3 = sys.version_info[0] >= 3
if IS_PYTHON3:
from itertools import zip_longest
from urllib.request import urlopen
else:
from itertools import izip as zip_longest
from urllib2 import urlopen
if IS_PYTHON3:
def force_str(s):
try:
return s.decode('ascii')
except AttributeError:
return s
def force_bytes(s):
try:
return s.encode('ascii')
except AttributeError:
return s
else:
def force_str(s):
return s
def force_bytes(s):
return s
def openfile(fn):
if fn.endswith(".gz"):
try:
return gzip.open(fn, "rt", encoding="utf-8")
except TypeError:
return gzip.open(fn, "r")
else:
return open(fn)
def checkBinaryEqual(filename1, filename2):
'''return true if the two files are binary equal.
'''
if os.path.getsize(filename1) != os.path.getsize(filename2):
return False
infile1 = open(filename1, "rb")
infile2 = open(filename2, "rb")
def chariter(infile):
while 1:
c = infile.read(1)
if c == b"":
break
yield c
found = False
for c1, c2 in zip_longest(chariter(infile1), chariter(infile2)):
if c1 != c2:
break
else:
found = True
infile1.close()
infile2.close()
return found
def checkGZBinaryEqual(filename1, filename2):
'''return true if the decompressed contents of the two files
are binary equal.
'''
with gzip.open(filename1, "rb") as infile1:
d1 = infile1.read()
with gzip.open(filename2, "rb") as infile2:
d2 = infile2.read()
if d1 == d2:
return True
return False
def check_samtools_view_equal(
filename1, filename2,
without_header=False):
'''return true if the two files are equal in their
content through samtools view.
'''
# strip MD and NM tags, as not preserved in CRAM files
args = ["-x", "MD", "-x", "NM"]
if not without_header:
args.append("-h")
lines1 = pysam.samtools.view(*(args + [filename1]))
lines2 = pysam.samtools.view(*(args + [filename2]))
if len(lines1) != len(lines2):
return False
if lines1 != lines2:
# line by line comparison
# sort each line, as tags get rearranged between
# BAM/CRAM
for n, pair in enumerate(zip(lines1, lines2)):
l1, l2 = pair
l1 = sorted(l1[:-1].split("\t"))
l2 = sorted(l2[:-1].split("\t"))
if l1 != l2:
print("mismatch in line %i" % n)
print(l1)
print(l2)
return False
else:
return False
return True
def check_url(url):
'''return True if URL is available.
A URL might not be available if it is the wrong URL
or there is no connection to the URL.
'''
try:
urlopen(url, timeout=1)
return True
except:
return False
def checkFieldEqual(cls, read1, read2, exclude=[]):
'''check if two reads are equal by comparing each field.'''
# add the . for refactoring purposes.
for x in (".query_name",
".query_sequence",
".flag",
".reference_id",
".reference_start",
".mapping_quality",
".cigartuples",
".next_reference_id",
".next_reference_start",
".template_length",
".query_length",
".query_qualities",
".bin",
".is_paired", ".is_proper_pair",
".is_unmapped", ".mate_is_unmapped",
".is_reverse", ".mate_is_reverse",
".is_read1", ".is_read2",
".is_secondary", ".is_qcfail",
".is_duplicate"):
n = x[1:]
if n in exclude:
continue
cls.assertEqual(getattr(read1, n), getattr(read2, n),
"attribute mismatch for %s: %s != %s" %
(n, getattr(read1, n), getattr(read2, n)))
def check_lines_equal(cls, a, b, sort=False, filter_f=None, msg=None):
"""check if contents of two files are equal comparing line-wise.
sort: bool
sort contents of both files before comparing.
filter_f:
remover lines in both a and b where expression is True
"""
with openfile(a) as inf:
aa = inf.readlines()
with openfile(b) as inf:
bb = inf.readlines()
if filter_f is not None:
aa = [x for x in aa if not filter_f(x)]
bb = [x for x in bb if not filter_f(x)]
if sort:
cls.assertEqual(sorted(aa), sorted(bb), msg)
else:
cls.assertEqual(aa, bb, msg)
def get_temp_filename(suffix=""):
caller_name = inspect.getouterframes(inspect.currentframe(), 2)[1][3]
try:
os.makedirs(TESTS_TEMPDIR)
except OSError:
pass
f = tempfile.NamedTemporaryFile(
prefix="pysamtests_tmp_{}_".format(caller_name),
suffix=suffix,
delete=False,
dir=TESTS_TEMPDIR)
f.close()
return f.name
@contextlib.contextmanager
def get_temp_context(suffix="", keep=False):
caller_name = inspect.getouterframes(inspect.currentframe(), 3)[1][3]
try:
os.makedirs(TESTS_TEMPDIR)
except OSError:
pass
f = tempfile.NamedTemporaryFile(
prefix="pysamtests_tmp_{}_".format(caller_name),
suffix=suffix,
delete=False,
dir=TESTS_TEMPDIR)
f.close()
yield f.name
if not keep:
# clear up any indices as well
for f in glob.glob(f.name + "*"):
os.unlink(f)
def make_data_files(directory):
what = None
try:
if not os.path.exists(os.path.join(directory, "all.stamp")):
subprocess.check_output(["make", "-C", directory], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
what = "Making test data in '%s' failed:\n%s" % (directory, force_str(e.output))
if what is not None:
raise RuntimeError(what)
def load_and_convert(filename, encode=True):
'''load data from filename and convert all fields to string.
Filename can be either plain or compressed (ending in .gz).
'''
data = []
if filename.endswith(".gz"):
with gzip.open(filename) as inf:
for line in inf:
line = line.decode("ascii")
if line.startswith("#"):
continue
d = line.strip().split("\t")
data.append(d)
else:
with open(filename) as f:
for line in f:
if line.startswith("#"):
continue
d = line.strip().split("\t")
data.append(d)
return data
def flatten_nested_list(l):
return [i for ll in l for i in ll]
|
pysam-developers/pysam
|
tests/TestUtils.py
|
Python
|
mit
| 7,792
|
[
"pysam"
] |
70a9bce026d6c67cdc66a6851cdbbc1be70e0bfc62c4c39edf6cd4d80d2be825
|
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Security import Properties
from DIRAC.AccountingSystem.private.Policies.FilterExecutor import FilterExecutor
class JobPolicy:
def __init__( self ):
self.__executor = FilterExecutor()
self.__executor.addGlobalFilter( self.__checkConditions )
def getListingConditions( self, credDict ):
#Send all data, just restrict in the end
return {}
condDict = {}
userProps = credDict[ 'properties' ]
if Properties.JOB_ADMINISTRATOR in userProps:
return condDict
elif Properties.JOB_MONITOR in userProps:
return condDict
elif Properties.JOB_SHARING in userProps:
condDict[ 'UserGroup' ] = [ credDict[ 'group' ] ]
elif Properties.NORMAL_USER in userProps:
condDict[ 'User' ] = [ credDict[ 'username' ] ]
return condDict
def checkRequest( self, iD, credDict, condDict, groupingList ):
return self.__executor.applyFilters( iD, credDict, condDict, groupingList )
def __checkConditions( self, credDict, condDict, groupingField ):
userProps = credDict[ 'properties' ]
if Properties.JOB_ADMINISTRATOR in userProps:
return S_OK()
elif Properties.JOB_MONITOR in userProps:
return S_OK()
elif Properties.JOB_SHARING in userProps:
if 'User' in condDict:
condDict[ 'UserGroup' ] = credDict[ 'group' ]
if 'User' == groupingField:
condDict[ 'UserGroup' ] = credDict[ 'group' ]
if 'UserGroup' in condDict:
condDict[ 'UserGroup' ] = credDict[ 'group' ]
if 'UserGroup' == groupingField:
condDict[ 'UserGroup' ] = credDict[ 'group' ]
elif Properties.NORMAL_USER in userProps:
if 'User' in condDict:
condDict[ 'User' ] = credDict[ 'username' ]
if 'User' == groupingField:
condDict[ 'User' ] = credDict[ 'username' ]
if 'UserGroup' in condDict:
condDict[ 'User' ] = credDict[ 'username' ]
condDict[ 'UserGroup' ] = credDict[ 'group' ]
if 'UserGroup' == groupingField:
condDict[ 'User' ] = credDict[ 'username' ]
condDict[ 'UserGroup' ] = credDict[ 'group' ]
else:
if 'User' in condDict:
del( condDict[ 'User' ] )
if 'UserGroup' in condDict:
del( condDict[ 'UserGroup' ] )
if 'User' == groupingField:
return S_ERROR( "You can't group plots by users! Bad boy!" )
if 'UserGroup' == groupingField:
return S_ERROR( "You can't group plots by user groups! Bad boy!" )
return S_OK()
def filterListingValues( self, credDict, dataDict ):
userProps = credDict[ 'properties' ]
if Properties.JOB_ADMINISTRATOR in userProps:
return S_OK( dataDict )
elif Properties.JOB_MONITOR in userProps:
return S_OK( dataDict )
elif Properties.JOB_SHARING in userProps:
dataDict[ 'User' ] = [ credDict[ 'username' ] ]
dataDict[ 'UserGroup' ] = [ credDict[ 'group' ] ]
return S_OK( dataDict )
elif Properties.NORMAL_USER in userProps:
dataDict[ 'User' ] = [ credDict[ 'username' ] ]
dataDict[ 'UserGroup' ] = [ credDict[ 'group' ] ]
return S_OK( dataDict )
dataDict[ 'User' ] = []
dataDict[ 'UserGroup' ] = []
return S_OK( dataDict )
|
Sbalbp/DIRAC
|
AccountingSystem/private/Policies/JobPolicy.py
|
Python
|
gpl-3.0
| 3,221
|
[
"DIRAC"
] |
042091419f3eb6a8439f46c8401332d266736204a9ada9b82d8de17df08dcc52
|
########################################################################
# File : dirac-proxy-init.py
# Author : Adrian Casajus
########################################################################
from __future__ import print_function
import sys
import getpass
import DIRAC
from DIRAC.Core.Base import Script
__RCSID__ = "$Id$"
class CLIParams(object):
proxyLifeTime = 2592000
diracGroup = False
certLoc = False
keyLoc = False
proxyLoc = False
onTheFly = False
stdinPasswd = False
rfcIfPossible = False
userPasswd = ""
def __str__(self):
data = []
for k in ('proxyLifeTime', 'diracGroup', 'certLoc', 'keyLoc', 'proxyLoc',
'onTheFly', 'stdinPasswd', 'userPasswd'):
if k == 'userPasswd':
data.append("userPasswd = *****")
else:
data.append("%s=%s" % (k, getattr(self, k)))
msg = "<UploadCLIParams %s>" % " ".join(data)
return msg
def setProxyLifeTime(self, arg):
try:
fields = [f.strip() for f in arg.split(":")]
self.proxyLifeTime = int(fields[0]) * 3600 + int(fields[1]) * 60
except ValueError:
print("Can't parse %s time! Is it a HH:MM?" % arg)
return DIRAC.S_ERROR("Can't parse time argument")
return DIRAC.S_OK()
def setProxyRemainingSecs(self, arg):
self.proxyLifeTime = int(arg)
return DIRAC.S_OK()
def getProxyLifeTime(self):
hours = self.proxyLifeTime / 3600
mins = self.proxyLifeTime / 60 - hours * 60
return "%s:%s" % (hours, mins)
def getProxyRemainingSecs(self):
return self.proxyLifeTime
def setDIRACGroup(self, arg):
self.diracGroup = arg
return DIRAC.S_OK()
def getDIRACGroup(self):
return self.diracGroup
def setCertLocation(self, arg):
self.certLoc = arg
return DIRAC.S_OK()
def setKeyLocation(self, arg):
self.keyLoc = arg
return DIRAC.S_OK()
def setProxyLocation(self, arg):
self.proxyLoc = arg
return DIRAC.S_OK()
def setOnTheFly(self, arg):
self.onTheFly = True
return DIRAC.S_OK()
def setStdinPasswd(self, arg):
self.stdinPasswd = True
return DIRAC.S_OK()
def showVersion(self, arg):
print("Version:")
print(" ", __RCSID__)
sys.exit(0)
return DIRAC.S_OK()
def registerCLISwitches(self):
Script.registerSwitch("v:", "valid=", "Valid HH:MM for the proxy. By default is one month", self.setProxyLifeTime)
Script.registerSwitch("g:", "group=", "DIRAC Group to embed in the proxy", self.setDIRACGroup)
Script.registerSwitch("C:", "Cert=", "File to use as user certificate", self.setCertLocation)
Script.registerSwitch("K:", "Key=", "File to use as user key", self.setKeyLocation)
Script.registerSwitch("P:", "Proxy=", "File to use as proxy", self.setProxyLocation)
Script.registerSwitch("f", "onthefly", "Generate a proxy on the fly", self.setOnTheFly)
Script.registerSwitch("p", "pwstdin", "Get passwd from stdin", self.setStdinPasswd)
Script.registerSwitch("i", "version", "Print version", self.showVersion)
Script.addDefaultOptionValue("LogLevel", "always")
from DIRAC import S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Core.Security import Locations
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
def uploadProxy(params):
DIRAC.gLogger.info("Loading user proxy")
proxyLoc = params.proxyLoc
if not proxyLoc:
proxyLoc = Locations.getDefaultProxyLocation()
if not proxyLoc:
return S_ERROR("Can't find any proxy")
if params.onTheFly:
DIRAC.gLogger.info("Uploading proxy on-the-fly")
certLoc = params.certLoc
keyLoc = params.keyLoc
if not certLoc or not keyLoc:
cakLoc = Locations.getCertificateAndKeyLocation()
if not cakLoc:
return S_ERROR("Can't find user certificate and key")
if not certLoc:
certLoc = cakLoc[0]
if not keyLoc:
keyLoc = cakLoc[1]
DIRAC.gLogger.info("Cert file %s" % certLoc)
DIRAC.gLogger.info("Key file %s" % keyLoc)
testChain = X509Chain()
retVal = testChain.loadKeyFromFile(keyLoc, password=params.userPasswd)
if not retVal['OK']:
passwdPrompt = "Enter Certificate password:"
if params.stdinPasswd:
userPasswd = sys.stdin.readline().strip("\n")
else:
userPasswd = getpass.getpass(passwdPrompt)
params.userPasswd = userPasswd
DIRAC.gLogger.info("Loading cert and key")
chain = X509Chain()
# Load user cert and key
retVal = chain.loadChainFromFile(certLoc)
if not retVal['OK']:
return S_ERROR("Can't load %s" % certLoc)
retVal = chain.loadKeyFromFile(keyLoc, password=params.userPasswd)
if not retVal['OK']:
return S_ERROR("Can't load %s" % keyLoc)
DIRAC.gLogger.info("User credentials loaded")
diracGroup = params.diracGroup
if not diracGroup:
result = chain.getCredentials()
if not result['OK']:
return result
if 'group' not in result['Value']:
return S_ERROR('Can not get Group from existing credentials')
diracGroup = result['Value']['group']
restrictLifeTime = params.proxyLifeTime
else:
proxyChain = X509Chain()
retVal = proxyChain.loadProxyFromFile(proxyLoc)
if not retVal['OK']:
return S_ERROR("Can't load proxy file %s: %s" % (params.proxyLoc, retVal['Message']))
chain = proxyChain
diracGroup = params.diracGroup
if params.diracGroup:
# Check that there is no conflict with the already present DIRAC group
result = chain.getDIRACGroup(ignoreDefault=True)
if result['OK'] and result['Value'] and result['Value'] == params.diracGroup:
# No need to embed a new DIRAC group
diracGroup = False
restrictLifeTime = 0
DIRAC.gLogger.info(" Uploading...")
return gProxyManager.uploadProxy(
chain,
diracGroup,
restrictLifeTime=restrictLifeTime,
rfcIfPossible=params.rfcIfPossible)
|
fstagni/DIRAC
|
FrameworkSystem/Client/ProxyUpload.py
|
Python
|
gpl-3.0
| 5,948
|
[
"DIRAC"
] |
e885158efeeb62defc27b92c06b6ab82d69e62be8077bfc71fef39f498a730b9
|
import math
import string
import sys
import struct
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as pyplot
import matplotlib.colors as pycolors
import matplotlib.cm as cm
import matplotlib.patches as patches
import numpy as np
#import cPickle
import scipy.ndimage
import scipy.stats as ss
import scipy.signal
import scipy as sp
import scipy.odr as odr
import glob
import os
import make_color_image
#import make_fake_wht
import gzip
import tarfile
import shutil
import congrid
import astropy.io.ascii as ascii
import warnings
import subprocess
import photutils
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.convolution import Gaussian2DKernel
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import *
import astropy.io.fits as pyfits
import datetime
import medianstats_bootstrap as msbs
#want my custom skimage latest version not ssbx
sys.path = ['/Users/gsnyder/ssbvirt/ssbx-osx/lib/python2.7/site-packages']+sys.path
import skimage.transform
import copy
import region_grow
def arcsec_per_radian():
return 3600.0*(180.0/math.pi)
def mad_value(array_input):
return msbs.MAD(array_input)
def median_value(array_input):
return np.median(array_input)
def mean_value(array_input):
return np.mean(array_input)
def std_value(array_input):
return np.std(array_input)
def var_value(array_input):
return np.var(array_input)
def madvar_value(array_input):
return msbs.MAD(array_input)**2
def central_moments(image, i_list, j_list, xc=None, yc=None):
xi = np.float32(np.arange(image.shape[0]))+0.5
pxpos,pypos = np.meshgrid(xi,xi)
mu_00 = np.sum(image)
if xc==None:
moment_array = (pxpos)*image
xc= np.sum(moment_array)/mu_00
if yc==None:
moment_array = (pypos)*image
yc=np.sum(moment_array)/mu_00
x_offsets = pxpos - xc
y_offsets = pypos - yc
mu_ij = []
eta_ij = []
for index,i in enumerate(i_list):
j=j_list[index]
moment_array = ((x_offsets)**i)*((y_offsets)**j)*image
mu_ij.append(np.sum(moment_array))
eta_ij.append( mu_ij[index]/(mu_00**(1.0 + float(i+j)/2.0)))
return np.asarray(mu_ij), np.asarray(eta_ij)
class galdata:
def __init__(self):
self.description='Lotz Morphology Input/Output object'
self.hu_moments = [None]
self.fs93_moments = [None]
self.lotz_morph_status = 'Not Started'
self.petro_segmap = None
def run_lotz_morphs(self):
#check for essential inputs
correct_init = hasattr(self,'galaxy_image')
correct_init = correct_init and hasattr(self,'galaxy_segmap')
correct_init = correct_init and hasattr(self,'npix')
correct_init = correct_init and hasattr(self,'pixel_xpos')
correct_init = correct_init and hasattr(self,'pixel_ypos')
correct_init = correct_init and hasattr(self,'xcentroid')
correct_init = correct_init and hasattr(self,'ycentroid')
correct_init = correct_init and hasattr(self,'elongation')
correct_init = correct_init and hasattr(self,'pa_radians')
correct_init = correct_init and hasattr(self,'skysig')
correct_init = correct_init and hasattr(self,'pixelscale_arcsec')
correct_init = correct_init and hasattr(self,'psf_fwhm_arcsec')
correct_init = correct_init and hasattr(self,'magseg')
correct_init = correct_init and hasattr(self,'magseg_err')
correct_init = correct_init and hasattr(self,'rproj_arcsec')
correct_init = correct_init and hasattr(self,'abzp')
if correct_init:
#carry out calculations
self.rpetro_eta = 0.2
self.extent_rpetro = 1.5
self.lotz_morph_status = 'Initialized'
#check S/N per galaxy pixel inside aperture defined by segmap
self.snpix_init = self.sn_per_pixel(self.galaxy_image,self.galaxy_segmap)
self.morph_hdu.header['SNP_INIT']=(round(self.snpix_init,8),'Initial average S/N per pixel')
if self.snpix_init < 1.0:
self.lotz_morph_status = 'Error: too faint!'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status)
print(' Average S/N per pixel: {:7.3f}'.format(self.snpix_init))
return
else:
self.lotz_morph_status = 'OK signal-to-noise (0)'
#initial radius calculation, result in pixels
self.rp_circ_1,self.rp_circ_status_1,self.rp_circ_err_1 = self.rpetro_circ(xcenter=self.xcentroid, ycenter=self.ycentroid)
print(' Found circular Petrosian Radius (1): {:8.4f} {:8.4f} {:25s} {:8.4f} {:8.4f} '.format(self.rp_circ_1, self.rp_circ_err_1, self.rp_circ_status_1, self.xcentroid, self.ycentroid))
if self.rp_circ_status_1 != 'Positive R_pet':
self.lotz_morph_status = 'Error: Poor Measurement of circular r_p (1)'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status)
return
else:
self.lotz_morph_status = 'Computed circular r_p (1)'
#use fixed skybox to minimize computation inside minimized asymmetry function
bkg_dif = (self.skybox - self.rot_skybox)
self.a_bkg = np.sum(np.abs(bkg_dif))/np.sum(np.ones_like(self.skybox))
#initial asymmetry minimization using rpc1
self.asym1,self.xcen_a1,self.ycen_a1,self.asym1_message = self.compute_asym(xcenter=self.xcentroid,ycenter=self.ycentroid,extent=self.rp_circ_1*self.extent_rpetro,a_bkg=self.a_bkg)
print(' Found Asymmetry & Center (1) : {:8.4f} {:8.4f} {:8.4f} {:45s}'.format(self.asym1,self.xcen_a1,self.ycen_a1, self.asym1_message))
if self.asym1==99.0:
self.lotz_morph_status = 'Error: Poor Measurement of Asymmetry & Center (1)'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status+', message: '+self.asym1_message)
return
else:
self.lotz_morph_status = 'Computed Asymmetry & Center (1)'
#recompute rpc2
self.rp_circ_2,self.rp_circ_status_2,self.rp_circ_err_2 = self.rpetro_circ(xcenter=self.xcen_a1, ycenter=self.ycen_a1)
print(' Found circular Petrosian Radius (2): {:8.4f} {:8.4f} {:25s} '.format(self.rp_circ_2, self.rp_circ_err_2, self.rp_circ_status_2))
if self.rp_circ_status_2 != 'Positive R_pet':
self.lotz_morph_status = 'Error: Poor Measurement of circular r_p (2)'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status)
return
else:
self.lotz_morph_status = 'Computed circular r_p (2)'
#final asymmetry centering
self.asym2,self.xcen_a2,self.ycen_a2,self.asym2_message = self.compute_asym(xcenter=self.xcen_a1,ycenter=self.ycen_a1,extent=self.rp_circ_2*self.extent_rpetro,a_bkg=self.a_bkg)
asym2,self.ga2,self.ba2 = self.galaxy_asymmetry(np.asarray([self.xcen_a1,self.ycen_a1]),radius=self.rp_circ_2*self.extent_rpetro,a_bkg=self.a_bkg)
print(' Found Asymmetry & Center (2) : {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f} '.format(self.asym2,self.xcen_a2,self.ycen_a2, self.a_bkg, self.ga2, self.ba2, asym2))
if self.asym2==99.0:
self.lotz_morph_status = 'Error: Poor Measurement of Asymmetry & Center (2)'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status+', message: '+self.asym2_message)
return
else:
self.lotz_morph_status = 'Computed Asymmetry & Center (2)'
#compute rpe, once and only once
self.rp_ellip,self.rp_ellip_status,self.rp_ellip_err = self.rpetro_ellip(xcenter=self.xcen_a2, ycenter=self.ycen_a2)
print(' Found elliptical Petrosian Radius : {:8.4f} {:8.4f} {:25s} '.format(self.rp_ellip, self.rp_ellip_err, self.rp_ellip_status))
if self.rp_ellip_status != 'Positive R_pet':
self.lotz_morph_status = 'Error: Poor Measurement of elliptical r_p'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status)
return
else:
self.lotz_morph_status = 'Computed elliptical r_p'
self.morph_hdu.header['RPC']=(round(self.rp_circ_2,8),'Petrosian Circular Radius (pixels)')
self.morph_hdu.header['RPC_ERR']=(round(self.rp_circ_err_2,8),'Error in Petrosian Circular Radius (pixels)')
self.morph_hdu.header['RPE']=(round(self.rp_ellip,8),'Petrosian Elliptical Semi-Major Axis (pixels)')
self.morph_hdu.header['RPE_ERR']=(round(self.rp_ellip_err,8),'Error in Petrosian Elliptical Semi-Major Axis (pixels)')
self.morph_hdu.header['ELONG']=(round(self.elongation,8),'Elongation Used in Elliptical Calcs')
self.morph_hdu.header['ORIENT']=(round(self.pa_radians,8),'Orientation of ellipse in radians')
self.morph_hdu.header['AXC']=(round(self.xcen_a2,8),'center x value minimizing Asym (pixels)')
self.morph_hdu.header['AYC']=(round(self.ycen_a2,8),'center y value minimizing Asym (pixels)')
self.morph_hdu.header['ASYM']=(round(self.asym2,8), 'Value of Asymmetry at final Asym center')
#half light radii??
#solve for moment center and segmap with a 2-step iteration
#1. Assume asymmetry center, compute segmap
#2. compute centroid in this initial segmap
#3. Compute segmap around this centroid instead
#4. Re-compute centroid?
self.petro_segmap_init = self.petro_sma_segmap(self.xcen_a2,self.ycen_a2,self.rp_ellip)
if self.petro_segmap_init is None:
self.lotz_morph_status = 'Error: Poor RPA segmap (1) (probably negative avg flux)'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status)
return
else:
self.lotz_morph_status = 'Computed RPA segmap (1)'
self.rpaseg_galaxy_image_init = np.where(self.petro_segmap_init==10.0, self.galaxy_image, np.zeros_like(self.galaxy_image))
#find G-M20 center by minimizing 2nd moment
#I'm pretty sure this is just the image centroid given the appropriate segmap???
m00 = np.sum(self.rpaseg_galaxy_image_init)
moment_array = (self.pixel_xpos)*self.rpaseg_galaxy_image_init
self.mxc= np.sum(moment_array)/m00
moment_array = (self.pixel_ypos)*self.rpaseg_galaxy_image_init
self.myc= np.sum(moment_array)/m00
#this assumes that 2nd moment is minimized when center is the centroid
self.petro_segmap = self.petro_sma_segmap(self.mxc,self.myc,self.rp_ellip)
self.rpaseg_galaxy_image = np.where(self.petro_segmap==10.0, self.galaxy_image, np.zeros_like(self.galaxy_image))
if self.petro_segmap is None:
self.lotz_morph_status = 'Error: Poor RPA segmap (2) (probably negative avg flux)'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status)
return
else:
self.lotz_morph_status = 'Computed RPA segmap (2)'
m00 = np.sum(self.rpaseg_galaxy_image)
moment_array = (self.pixel_xpos)*self.rpaseg_galaxy_image
self.mxc= np.sum(moment_array)/m00
moment_array = (self.pixel_ypos)*self.rpaseg_galaxy_image
self.myc= np.sum(moment_array)/m00
self.morph_hdu.header['MXC']=(round(self.mxc,8),'Centroid of final RPA image')
self.morph_hdu.header['MYC']=(round(self.myc,8),'Centroid of final RPA image')
#check S/N per galaxy pixel inside aperture defined above
self.snpix = self.sn_per_pixel(self.galaxy_image,self.petro_segmap)
self.morph_hdu.header['SNP']=(round(self.snpix,8),'Final average S/N per pixel')
if self.snpix < 2.0:
self.lotz_morph_status = 'Error: too faint!'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status)
print(' Average S/N per pixel: {:7.3f}'.format(self.snpix))
return
else:
self.lotz_morph_status = 'OK signal-to-noise (2)'
#compute concentration--choose center?
self.cc,self.cc_err,self.r20,self.r20_err,self.r80,self.r80_err,self.cc_status = self.concentration(self.xcen_a2,self.ycen_a2,1.5*self.rp_circ_2)
if self.cc_status != 'Calculated C':
self.lotz_morph_status = 'Warning: Bad Measurement of C'
self.morph_hdu.header['CFLAG']=(1,'Bad C measurement')
self.cflag = 1
print(' Found bad concentration : '+self.lotz_morph_status + ' '+self.cc_status, self.cc, self.cc_err, self.r20,self.r20_err,self.r80,self.r80_err)
else:
self.lotz_morph_status = 'Computed C'
self.morph_hdu.header['CFLAG']=(0,'OK C measurement')
self.morph_hdu.header['CC']=(round(self.cc,8),'Concentration Parameter')
self.morph_hdu.header['CC_ERR']=(round(self.cc_err,8),'Statistical error on Concentration Parameter')
self.morph_hdu.header['CC_R20']=(round(self.r20,8),'R20 value (pixels)')
self.morph_hdu.header['CC_R20E']=(round(self.r20_err,8),'R20 error (pixels)')
self.morph_hdu.header['CC_R80']=(round(self.r80,8),'R80 value (pixels)')
self.morph_hdu.header['CC_R80E']=(round(self.r80_err,8),'R80 error (pixels)')
self.cflag = 0
print(' Found concentration : {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:25s} '.format(self.cc, self.cc_err,self.r20,self.r20_err,self.r80,self.r80_err, self.cc_status))
#use region from internal segmentation map algorithm
analyze_image = self.rpaseg_galaxy_image
#gini
self.gini = self.compute_gini(self.galaxy_image,self.petro_segmap)
if True:
print(' Found Gini : {:8.4f} '.format(self.gini))
self.lotz_morph_status = 'Computed G'
self.morph_hdu.header['GINI']=(round(self.gini,8),'Gini (as defined by Lotz et al. 2004)')
#m20
self.m20 = self.compute_m20(analyze_image)
if self.m20 is not None:
print(' Found M20 : {:8.4f} '.format(self.m20))
self.lotz_morph_status = 'Computed M20'
self.morph_hdu.header['M20']=(round(self.m20,8),'M20 (as defined by Lotz et al. 2004)')
else:
print(' Bad M20 (too faint or small?) : ', np.sum(analyze_image))
self.lotz_morph_status = 'Computed M20'
label,num=scipy.ndimage.measurements.label(self.petro_segmap)
if num != 1:
print(' Non-contiguous or non-existent segmap! {:4d} '.format(num))
self.lotz_morph_status = 'Error: bad segmap'
#half-light radii
self.rhalf_circ,self.rhalf_circ_err,self.rhalf_circ_status = self.fluxrad_circ(0.5,self.xcen_a2,self.ycen_a2,1.5*self.rp_circ_2)
self.rhalf_ellip,self.rhalf_ellip_err,self.rhalf_ellip_status = self.fluxrad_ellip(0.5,self.xcen_a2,self.ycen_a2,1.5*self.rp_ellip)
if self.rhalf_circ is not None and self.rhalf_ellip is not None:
print(' Found R_half : {:8.4f} {:8.4f} '.format(self.rhalf_circ,self.rhalf_circ_err))
print(' Found R_half_e : {:8.4f} {:8.4f} '.format(self.rhalf_ellip,self.rhalf_ellip_err))
self.morph_hdu.header['R5C']=(round(self.rhalf_circ,8),'Circular half-light radius (pix)')
self.morph_hdu.header['R5E']=(round(self.rhalf_ellip,8),'Elliptical half-light radius (pix)')
self.morph_hdu.header['ER5C']=(round(self.rhalf_circ_err,8),'Error on circular half-light radius')
self.morph_hdu.header['ER5E']=(round(self.rhalf_ellip_err,8),'Error on elliptical half-light radius')
#MID
#First, follow Freeman by assuming the galaxy lies at the very center of the image
#Second, use Lotz petro segmap to compute everything a 2nd time, for comparison purposes
#this alleviates some issues if segmaps are wildly different (can happen in mergers/clusters)
self.midmap,self.sn_mid_center,self.midseg_area = self.mid_segmap(self.galaxy_image,int(float(self.npix)/2.0),int(float(self.npix)/2.0))
if np.sum(self.midmap)==0.0:
print(' Empty MID segmap! ')
self.lotz_morph_status = 'Error: Empty MID Segmap'
return
else:
print(' Found MID segmap ')
#multiply by 1-valued MID segmap to get image for MID
self.mid_image = self.galaxy_image*self.midmap
#set negative values == 0
self.mid_image = np.where(self.mid_image > 0.0, self.mid_image, np.zeros_like(self.mid_image))
self.m_prime, self.m_stat_a1, self.m_stat_a2, self.m_stat_level = self.compute_m_statistic(self.mid_image)
if self.m_prime is not None:
print(' Found M statistic(1) : {:8.4f} {:8.4f} {:8.4f} {:8.4f} '.format(self.m_prime,self.m_stat_a1,self.m_stat_a2,self.m_stat_level))
else:
print(' Bad M statistic (check mid segmap) : ')
self.lotz_morph_status = 'Error: Bad M Statistic'
return
self.i_stat, self.i_stat_xpeak, self.i_stat_ypeak, self.i_stat_clump = self.compute_i_statistic(self.mid_image)
#print(self.i_stat, self.i_stat_xpeak, self.i_stat_ypeak, self.i_stat_clump.shape
if self.i_stat is not None:
print(' Found I statistic(1) : {:8.4f} {:8.4f} {:8.4f} '.format(self.i_stat, self.i_stat_xpeak, self.i_stat_ypeak))
else:
print(' Bad I statistic : ')
self.lotz_morph_status = 'Error: Bad I Statistic'
return
self.d_stat, self.d_stat_area, self.d_stat_xcen, self.d_stat_ycen = self.compute_d_statistic(self.mid_image,self.i_stat_xpeak,self.i_stat_ypeak)
if self.d_stat is not None:
print(' Found D statistic(1) : {:8.4f} {:8.4f} {:8.4f} {:8.4f}'.format(self.d_stat, self.d_stat_area, self.d_stat_xcen, self.d_stat_ycen))
else:
print(' Bad D statistic : ')
self.lotz_morph_status = 'Error: Bad D Statistic'
return
self.mid1_snpix = self.sn_per_pixel(self.galaxy_image,self.midmap)
self.lotz_morph_status = 'Computed MID (1)'
self.morph_hdu.header['MIDSEG']=('Freeman','MID_ cards use Freeman segmap algo')
self.morph_hdu.header['MID_AREA'] = (round(np.sum(self.midmap),1),'Area in pixels of MID segmap')
self.morph_hdu.header['MID_SNP']=(round(self.mid1_snpix,8),'MID average S/N per pixel')
self.morph_hdu.header['MID_MP']=(self.m_prime,'Mprime stat (Freeman et al. 2013)')
self.morph_hdu.header['MID_A1']=(round(self.m_stat_a1,4),'Area 1 for Mprime')
self.morph_hdu.header['MID_A2']=(round(self.m_stat_a2,4),'Area 2 for Mprime')
self.morph_hdu.header['MID_LEV']=(round(self.m_stat_level,8),'Level for Mprime')
self.morph_hdu.header['MID_I']=(self.i_stat,'I stat (Freeman et al. 2013)')
self.morph_hdu.header['MID_IXP']=(round(self.i_stat_xpeak,2),'xpeak for I')
self.morph_hdu.header['MID_IYP']=(round(self.i_stat_ypeak,2),'ypeak for I')
self.morph_hdu.header['MID_D']=(self.d_stat,'D stat (Freeman et al. 2013)')
self.morph_hdu.header['MID_DA']=(round(self.d_stat_area,4),'area for D stat')
self.morph_hdu.header['MID_DXC']=(round(self.d_stat_xcen,4),'centroid for D stat')
self.morph_hdu.header['MID_DYC']=(round(self.d_stat_ycen,4),'centroid for D stat')
#we can also do fun M20, Gini here
#use region from Freeman segmentation map algorithm
#gini
self.mid1_gini = self.compute_gini(self.galaxy_image,self.midmap*10.0)
if True:
print(' Found Gini(MID1) : {:8.4f} '.format(self.mid1_gini))
self.lotz_morph_status = 'Computed G(2)'
self.morph_hdu.header['MID_GINI']=(round(self.mid1_gini,8),'Gini in Freeman segmap')
#m20
self.mid1_m20 = self.compute_m20(self.galaxy_image*self.midmap)
if self.mid1_m20 is not None:
print(' Found M20(MID1) : {:8.4f} '.format(self.mid1_m20))
self.lotz_morph_status = 'Computed M20(2)'
self.morph_hdu.header['MID_M20']=(round(self.mid1_m20,8),'M20 in Freeman segmap')
else:
print(' Bad M20 (too faint or small?) : ', np.sum(self.galaxy_image*self.midmap))
self.lotz_morph_status = 'Computed M20(2)'
#try again with Lotz segmap for comparison
#interesting note: trying Freeman segmap algo with Lotz center instead of galaxy center yields very similar segmap to Lotz
self.mid2_image = 1.0*self.rpaseg_galaxy_image
self.mid2_image = np.where(self.mid2_image > 0.0,self.mid2_image,np.zeros_like(self.mid2_image))
self.mid2_m_prime, self.mid2_m_stat_a1, self.mid2_m_stat_a2, self.mid2_m_stat_level = self.compute_m_statistic(self.mid2_image)
if self.mid2_m_prime is not None:
print(' Found M statistic(2) : {:8.4f} {:8.4f} {:8.4f} {:8.4f} '.format(self.mid2_m_prime,self.mid2_m_stat_a1,self.mid2_m_stat_a2,self.mid2_m_stat_level))
else:
print(' Bad M statistic(2)(check segmap) : ')
self.lotz_morph_status = 'Error: Bad M Statistic'
return
self.mid2_i_stat, self.mid2_i_stat_xpeak, self.mid2_i_stat_ypeak, self.mid2_i_clump = self.compute_i_statistic(self.mid2_image)
if self.mid2_i_stat is not None:
print(' Found I statistic(2) : {:8.4f} {:8.4f} {:8.4f} '.format(self.mid2_i_stat, self.mid2_i_stat_xpeak, self.mid2_i_stat_ypeak))
else:
print(' Bad I statistic(2) : ')
self.lotz_morph_status = 'Error: Bad I Statistic'
return
self.mid2_d_stat, self.mid2_d_stat_area, self.mid2_d_stat_xcen, self.mid2_d_stat_ycen = self.compute_d_statistic(self.mid2_image,self.mid2_i_stat_xpeak,self.mid2_i_stat_ypeak)
if self.mid2_d_stat is not None:
print(' Found D statistic(2) : {:8.4f} {:8.4f} {:8.4f} {:8.4f}'.format(self.mid2_d_stat, self.mid2_d_stat_area, self.mid2_d_stat_xcen, self.mid2_d_stat_ycen))
else:
print(' Bad D statistic(2) : ')
self.lotz_morph_status = 'Error: Bad D Statistic'
return
self.lotz_morph_status = 'Computed MID (2)'
self.morph_hdu.header['MID2SEG']=('Lotz','MID2_ cards use Lotz segmap algo')
self.morph_hdu.header['MID2_MP']=(self.mid2_m_prime,'Mprime stat (Freeman et al. 2013)')
self.morph_hdu.header['MID2_A1']=(round(self.mid2_m_stat_a1,4),'Area 1 for Mprime')
self.morph_hdu.header['MID2_A2']=(round(self.mid2_m_stat_a2,4),'Area 2 for Mprime')
self.morph_hdu.header['MID2_LEV']=(round(self.mid2_m_stat_level,8),'Level for Mprime')
self.morph_hdu.header['MID2_I']=(self.mid2_i_stat,'I stat (Freeman et al. 2013)')
self.morph_hdu.header['MID2_IXP']=(round(self.mid2_i_stat_xpeak,2),'xpeak for I')
self.morph_hdu.header['MID2_IYP']=(round(self.mid2_i_stat_ypeak,2),'ypeak for I')
self.morph_hdu.header['MID2_D']=(self.mid2_d_stat,'D stat (Freeman et al. 2013)')
self.morph_hdu.header['MID2_DA']=(round(self.mid2_d_stat_area,4),'area for D stat')
self.morph_hdu.header['MID2_DXC']=(round(self.mid2_d_stat_xcen,4),'centroid for D stat')
self.morph_hdu.header['MID2_DYC']=(round(self.mid2_d_stat_ycen,4),'centroid for D stat')
else:
#raise error
self.lotz_morph_status = 'Error: Uninitialized'
print(' Exiting Morph calculation with status: '+self.lotz_morph_status)
return
#following 5 functions adapted from Freeman et al. (2013) and Peth et al. (2016)
def compute_d_statistic(self,img,xpeak,ypeak):
nx = img.shape[0]
ny = img.shape[1]
xcen = 0.0
ycen = 0.0
#first, find centroids
m00 = np.sum(img)
moment_array = (self.pixel_xpos)*img
xcen = np.sum(moment_array)/m00
moment_array = (self.pixel_ypos)*img
ycen= np.sum(moment_array)/m00
area = np.sum(np.where(img > 0.0,np.ones_like(img),np.zeros_like(img)))
d_stat = (((xpeak-xcen)**2 + (ypeak-ycen)**2)**0.5)/((area/math.pi)**0.5)
return d_stat, area, xcen, ycen
def compute_i_statistic(self,img,scale=1.0):
if scale > 0.0:
new_img = scipy.ndimage.filters.gaussian_filter(img,scale,mode='nearest')
else:
new_img = 1.0*img
nx = new_img.shape[0]
ny = new_img.shape[1]
cimg = new_img*1.0
clump,xpeak,ypeak = self.i_clump(new_img)
w = np.where(xpeak != -9)[0]
if w.shape[0] == 0:
return None, None, None, clump
elif w.shape[0]==1:
return 0.0,xpeak[w][0]+0.5,ypeak[w][0]+0.5,clump
else:
#w.shape[0] > 1
int_clump = np.zeros_like(np.float32(w))
ucl = np.unique(clump)
for cv in ucl:
if cv<=0:
continue
int_clump[cv-1]=np.sum(np.where(clump==cv,img,np.zeros_like(img)))
#I think the above implementation is way faster?
#for jj in np.arange(nx):
# for kk in np.arange(ny):
# if clump[jj,kk] > 0:
# int_clump[clump[jj,kk]-1]=int_clump[clump[jj,kk]-1] + img[jj,kk]
mxi = np.argmax(int_clump)
mx = int_clump[mxi]
xpeak = xpeak[mxi]
ypeak = ypeak[mxi]
s = np.argsort(int_clump)
int_ratio = int_clump[s][-2]/int_clump[s][-1]
return int_ratio,xpeak+0.5,ypeak+0.5,clump #registering center on same pixel grid for D
def i_clump(self,img):
nx = img.shape[0]
ny = img.shape[1]
clump = -1 + np.zeros_like(np.int32(img))
xpeak = -9 + np.zeros_like(np.linspace(0,1,100))
ypeak = -9 + np.zeros_like(np.linspace(0,1,100))
for jj in np.arange(nx):
for kk in np.arange(ny):
if img[jj,kk]==0.0:
continue
jjcl=jj*1
kkcl=kk*1
istop=0
while (istop==0):
jjmax=jjcl*1
kkmax=kkcl*1
imgmax=img[jjcl,kkcl]
for mm in [-1,0,1]:
if (jjcl+mm >= 0) and (jjcl+mm < nx):
for nn in [-1,0,1]:
if (kkcl+nn >= 0) and (kkcl+nn < ny):
if (img[jjcl+mm,kkcl+nn] > imgmax):
imgmax = img[jjcl+mm,kkcl+nn]
jjmax=jjcl+mm
kkmax=kkcl+nn
#end of mm, nn loops
if jjmax==jjcl and kkmax==kkcl:
ifound=0
mm=0
while (ifound==0) and (xpeak[mm] != -9) and (mm < 99):
if (xpeak[mm]==jjmax) and (ypeak[mm]==kkmax):
ifound=1
else:
mm = mm+1
#endwhile
if (ifound==0):
xpeak[mm]=jjmax
ypeak[mm]=kkmax
clump[jj,kk]=mm
istop=1
else:
jjcl = jjmax
kkcl = kkmax
#endwhile
#endfor
#endfor
clump = clump+1
return clump, xpeak, ypeak
def compute_m_statistic(self,img,levels=None):
if levels is None:
levels = np.linspace(0.5,0.98,num=25)
nlevels = levels.shape[0]
norm_img = img/np.max(img)
area_ratio = np.zeros_like(levels)
a1 = np.zeros_like(levels)
a2 = np.zeros_like(levels)
max_level = 0
w = np.where(norm_img != 0.0)
if w[0].shape[0]==0:
return None,None,None,None
npix = w[0].shape[0]
snorm_img = norm_img[w]
si = np.argsort(snorm_img)
snorm_img = snorm_img[si] #sorted non-zero pixels
ai = np.argsort(norm_img)
for i,lev in enumerate(levels):
thr = int(round(npix*lev)-1)
w = np.where(norm_img >= snorm_img[thr])
if w[0].shape[0] > 0:
thr_arr = np.asarray([snorm_img[thr],1.0])
r,num,clump = region_grow.region_grow(norm_img,ai,THRESHOLD=thr_arr)
if r.shape[0] > 1:
u,counts = np.unique(clump,return_counts=True)
nzi = np.where(u != 0.0)[0]
if nzi.shape[0] > 1:
new_u = u[nzi]
new_counts = counts[nzi]
sci = np.argsort(new_counts)
a1[i] = float(new_counts[sci[-1]])#area of largest clump
a2[i] = float(new_counts[sci[-2]])#area of 2nd largest
area_ratio[i] = float(a2[i])/float(a1[i])
if np.max(area_ratio) > 0.0:
imax = np.argmax(area_ratio)
max_level = levels[imax]
m_prime = area_ratio[imax]
a1_val = a1[imax]
a2_val = a2[imax]
return m_prime, a1_val, a2_val, max_level
else:
return 0.0,0.0,0.0,0.0
def mid_segmap(self, img, xcen, ycen, e = 0.2, t = 100.0):
flat_img = img.flatten()
si = np.argsort(flat_img)
sort_img = flat_img[si]
npix = sort_img.shape[0]
minval = np.min(img)
maxval = np.max(img)
#level = np.logspace(np.log10(0.99),-5.0,num=198)
#expanded number of quantiles... was going down too low with only 200
level = np.linspace(0.999,0.0,num=2000)
nlevel = level.shape[0]
mid_segmap = np.zeros_like(img)
mu = 0.0
dmu = 0.0
dnw=0
sn_mid_center = img[xcen,ycen]/self.skysig
for i,lev in enumerate(level):
thr = np.asarray([sort_img[int(lev*npix)], np.max(img)])
r,num,clump = region_grow.region_grow(img,si,THRESHOLD=thr)
if clump[xcen,ycen]==0.0:
continue
w = np.where(clump==clump[xcen,ycen])
if mu > 0.0:
dnw = w[0].shape[0]-nw
if dnw < 16:
continue
dmu = (np.sum(img[w])-mu*nw)/float(dnw)
#nw = w[0].shape[0]
#print(i,lev,r.shape, xcen, ycen, clump[xcen,ycen], thr, self.skysig,nw, dnw)
if (dnw > 1.1*npix/2000.0) and (i > t-1):
nthr = np.asarray([sort_img[int(level[i-1]*npix)], np.max(img)])
r,num,clump = region_grow.region_grow(img,si,THRESHOLD=nthr)
w = np.where(clump==clump[xcen,ycen])
dnw = w[0].shape[0]-nw
if dnw < 16:
continue
dmu = (np.sum(img[w])-mu*nw)/float(dnw)
#nw = w[0].shape[0]
if (dmu/(mu+dmu) < e):
mid_segmap = np.where(clump==clump[xcen,ycen],np.ones_like(clump),np.zeros_like(clump))
#regularize map
#mid_segmap = scipy.ndimage.filters.uniform_filter(mid_segmap,size=5)
mid_segmap = scipy.ndimage.filters.gaussian_filter(mid_segmap,self.psf_fwhm_pixels/2.355)
mid_segmap = np.where(mid_segmap > 1.0e-1,np.ones_like(mid_segmap),np.zeros_like(mid_segmap))
#print('{:8d} {:5.4f} {:10.4f} {:10.4f} {:8d}'.format(i, lev, mu, dmu, dnw))
return mid_segmap,sn_mid_center,np.sum(mid_segmap)
mu = np.mean(img[w])
nw = w[0].shape[0]
#print('{:8d} {:5.4f} {:10.4f} {:10.4f} {:8d}'.format(i, lev, mu, dmu, dnw))
return mid_segmap,sn_mid_center,np.sum(mid_segmap)
#estimate mean S/N ratio of galaxy pixels, assuming long integration times (sky dominated)
def sn_per_pixel(self,image,segmap):
im = image.flatten()
seg = segmap.flatten()
ap = np.where(seg > 0.0)[0]
n = np.sum(np.ones_like(ap))
s2n = np.sum( im[ap]/((self.skysig**2)**0.5))/n
return s2n
#calculate M_20 as in Lotz et al. 2004
def compute_m20(self,analyze_image):
m20 = 0.0
mu,eta = central_moments(analyze_image,[2,0],[0,2],xc=self.mxc,yc=self.myc)
mtot = mu[0] + mu[1]
x_array = (self.pixel_xpos).flatten()-self.mxc
y_array = (self.pixel_ypos).flatten()-self.myc
r2_array = x_array**2 + y_array**2
im_array = analyze_image.flatten()
si = np.flipud(np.argsort(im_array))
fsum = 0.0
mom20 = 0.0
totsum = np.sum(im_array)
for i in si:
fsum = fsum + im_array[i]
mom20 = mom20 + im_array[i]*r2_array[i]
if fsum/totsum > 0.20:
break
if mom20 > 0.0 and mtot > 0.0:
m20param = np.log10(mom20/mtot)
return m20param
else:
return None
#calculate Gini as in Lotz et al. 2004
def compute_gini(self,analyze_image,segmap):
analyze_image = (analyze_image).flatten()
map_pixels = (segmap).flatten()
pixelvals = np.abs( (analyze_image)[np.where(map_pixels == 10.0)[0]] )
sorted_pixelvals = np.sort(pixelvals)
total_absflux = np.sum(sorted_pixelvals)
mean_absflux = np.mean(sorted_pixelvals)
gini = 0.0
n = float(sorted_pixelvals.shape[0])
for i,x in enumerate(sorted_pixelvals):
gini = gini + (2.0*float(i)-n-1.0)*x
gini = gini/total_absflux/(n-1.0)
return gini
#calculate circular concentration following Conselice 2003
#Note: experiments reveal a difference between this algorithm
# and results using Lotz et al. (2004) IDL code,
# This code gives ~0.2 lower median C values with a
# random difference of ~0.2 compared with IDL version
# Reason: IDL implementation unconverged in center of curve-of-growth
# Requires smaller/more accurate sub-pixel steps
# Python version uses effectively infinite pixel resolution
# and is shown to be stable wrt number of points in COG
def concentration(self,xcenter,ycenter,extent):
xi = np.float32(np.arange(self.npix+1))
xpos,ypos = np.meshgrid(xi,xi)
xmin,xmax = np.min(xpos-xcenter),np.max(xpos-xcenter)
ymin,ymax = np.min(ypos-ycenter),np.max(ypos-ycenter)
analyze_image = self.galaxy_image
frac_overlap_extent = photutils.geometry.circular_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,extent,1,3)
total_area = np.sum(frac_overlap_extent)
total_flux = np.sum(analyze_image*frac_overlap_extent)
if total_flux <= 0.0:
return None,None,None,None,None,None,'Error: Probably too faint/weird to measure C'
err_on_total = ((total_area)*self.skysig**2)**0.5
numpts=200
minr = 0.2
maxr = extent
radius_grid = np.logspace(np.log10(minr),np.log10(maxr),num=numpts)
#radius_grid = np.linspace(minr,maxr,num=numpts)
r20 = 0.0
r20_err = -1.0
r80 = 0.0
r80_err = -1.0
this_flux = np.zeros_like(radius_grid)
ffrac = np.zeros_like(radius_grid)
ffrac_err = np.zeros_like(radius_grid)
for i,r in enumerate(radius_grid):
frac_overlap_r = photutils.geometry.circular_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r,1,3)
area = np.sum(frac_overlap_r)
#expected_negative_flux = -1.0*self.skysig*(area/2.0)*(math.pi/4.0)
this_flux[i] = np.sum(analyze_image*frac_overlap_r)
ffrac[i] = this_flux[i]/total_flux
err_on_flux = ((area)*self.skysig**2)**0.5 #error on average flux
if (this_flux[i] > 0.0):
ffrac_err[i] = ( (err_on_flux/this_flux[i])**2 + (err_on_total/total_flux)**2 )**0.5
else:
#avg_flux_in_r is negative
ffrac_err[i] = -1.0
#evaluate curve of growth for 0.2, 0.8
r20,r20_err,r20_status = self.evaluate_cog(0.2,radius_grid,this_flux,ffrac,ffrac_err,total_flux)
r80,r80_err,r80_status = self.evaluate_cog(0.8,radius_grid,this_flux,ffrac,ffrac_err,total_flux)
if (r20_status == 'Positive R_val') and (r80_status == 'Positive R_val'):
#compute concentration, return all
assert r20 is not None
assert r80 is not None
assert r20_err > 0.0
assert r80_err > 0.0
if r20 < r80:
#this is a correct result
c = 5.0*np.log10(r80/r20)
dcdr80 = 5.0*(1.0/r80)*(1.0/np.log(10.0))
dcdr20 = -5.0*(1.0/r20)*(1.0/np.log(10.0))
c_err = ( (dcdr80**2)*(r80_err**2) + (dcdr20**2)*(r20_err**2) )**0.5
return c, c_err, r20, r20_err, r80, r80_err, 'Calculated C'
else:
#this is weird
return None,None,None,None,None,None,'Error:Weird C calculation'
else:
#error, return Nones and status
return None,None,None,None,None,None,'Error: r20 or r80 poorly defined'
#helper function for concentration and other curve of growth estimates
def evaluate_cog(self,value, r_array, flux_r, frac_r, fracerr_r, totalflux):
this_r_pixels = None
r_pixels_err = None
if np.min(frac_r) > value:
status = 'Galaxy nucleus dominates'
elif np.max(frac_r) < value:
#I think this will most often happen when all are zero
status = 'Galaxy too small or faint'
else:
#max is > 0.2 and min is < 0.2 --> it crosses at least once
#find first crossing and then interpolate
#demand that first crossing occurs after r=0.2 pix otherwise we're probably just seeing noise
eta_ind = np.where(np.logical_and(frac_r >= value,r_array > 0.2))[0]
if eta_ind.shape[0] > 0:
for ei in eta_ind:
if ei==0:
continue
elif frac_r[ei-1] < value:
#interpolate
this_r_pixels = np.interp(value,frac_r[ei-1:ei+1],r_array[ei-1:ei+1])
delta_eta = np.abs(frac_r[ei-1] - frac_r[ei])
delta_r = np.abs(r_array[ei-1] - r_array[ei])
fsigma_eta = np.max(fracerr_r[ei-1:ei+1])
sigma_eta = value*fsigma_eta
r_pixels_err = (((delta_r/delta_eta)**2)*(sigma_eta**2))**0.5 #conservative estimate of error on r
break
else:
continue
if this_r_pixels<=0.0:
status = 'Weird light profile or too faint'
else:
status = 'Positive R_val'
else:
#actually it never crosses: error
status = 'Error: problem with R_val curve of growth'
return this_r_pixels, r_pixels_err, status
def fluxrad_circ(self,fluxfrac,xcenter,ycenter,extent):
xi = np.float32(np.arange(self.npix+1))
xpos,ypos = np.meshgrid(xi,xi)
xmin,xmax = np.min(xpos-xcenter),np.max(xpos-xcenter)
ymin,ymax = np.min(ypos-ycenter),np.max(ypos-ycenter)
analyze_image = self.galaxy_image
frac_overlap_extent = photutils.geometry.circular_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,extent,1,3)
total_area = np.sum(frac_overlap_extent)
total_flux = np.sum(analyze_image*frac_overlap_extent)
if total_flux <= 0.0:
return None,None,None,None,None,None,'Error: Probably too faint/weird to measure C'
err_on_total = ((total_area)*self.skysig**2)**0.5
numpts=200
minr = 0.2
maxr = extent
radius_grid = np.logspace(np.log10(minr),np.log10(maxr),num=numpts)
#radius_grid = np.linspace(minr,maxr,num=numpts)
r20 = 0.0
r20_err = -1.0
r80 = 0.0
r80_err = -1.0
this_flux = np.zeros_like(radius_grid)
ffrac = np.zeros_like(radius_grid)
ffrac_err = np.zeros_like(radius_grid)
for i,r in enumerate(radius_grid):
frac_overlap_r = photutils.geometry.circular_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r,1,3)
area = np.sum(frac_overlap_r)
this_flux[i] = np.sum(analyze_image*frac_overlap_r)
ffrac[i] = this_flux[i]/total_flux
err_on_flux = ((area)*self.skysig**2)**0.5 #error on average flux
if (this_flux[i] > 0.0):
ffrac_err[i] = ( (err_on_flux/this_flux[i])**2 + (err_on_total/total_flux)**2 )**0.5
else:
#avg_flux_in_r is negative
ffrac_err[i] = -1.0
rf,rf_err,rf_status = self.evaluate_cog(fluxfrac,radius_grid,this_flux,ffrac,ffrac_err,total_flux)
return rf,rf_err,rf_status
def fluxrad_ellip(self,fluxfrac,xcenter,ycenter,extent):
xi = np.float32(np.arange(self.npix+1))
xpos,ypos = np.meshgrid(xi,xi)
xmin,xmax = np.min(xpos-xcenter),np.max(xpos-xcenter)
ymin,ymax = np.min(ypos-ycenter),np.max(ypos-ycenter)
analyze_image = self.galaxy_image
frac_overlap_extent = photutils.geometry.elliptical_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,extent,extent/self.elongation,self.pa_radians,1,3)
total_area = np.sum(frac_overlap_extent)
total_flux = np.sum(analyze_image*frac_overlap_extent)
if total_flux <= 0.0:
return None,None,'Error: Probably too faint/weird to measure C'
err_on_total = ((total_area)*self.skysig**2)**0.5
numpts=200
minr = 0.2
maxr = extent
radius_grid = np.logspace(np.log10(minr),np.log10(maxr),num=numpts)
#radius_grid = np.linspace(minr,maxr,num=numpts)
r20 = 0.0
r20_err = -1.0
r80 = 0.0
r80_err = -1.0
this_flux = np.zeros_like(radius_grid)
ffrac = np.zeros_like(radius_grid)
ffrac_err = np.zeros_like(radius_grid)
for i,r in enumerate(radius_grid):
frac_overlap_r = photutils.geometry.elliptical_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r,r/self.elongation,self.pa_radians,1,3)
area = np.sum(frac_overlap_r)
this_flux[i] = np.sum(analyze_image*frac_overlap_r)
ffrac[i] = this_flux[i]/total_flux
err_on_flux = ((area)*self.skysig**2)**0.5 #error on average flux
if (this_flux[i] > 0.0):
ffrac_err[i] = ( (err_on_flux/this_flux[i])**2 + (err_on_total/total_flux)**2 )**0.5
else:
#avg_flux_in_r is negative
ffrac_err[i] = -1.0
rf,rf_err,rf_status = self.evaluate_cog(fluxfrac,radius_grid,this_flux,ffrac,ffrac_err,total_flux)
return rf,rf_err,rf_status
#code's internal segmentation map algorithm, following Lotz et al. (2004)
#this is probably the most important uncertainty in the translation from the IDL code
def petro_sma_segmap(self,xcenter,ycenter,r_ellip):
#first, convolve by Gaussian with FWHM~ 1/5 (1/10?) petrosian radius?
fwhm_pixels = r_ellip/10.0
galaxy_psf_pixels = self.psf_fwhm_arcsec/self.pixelscale_arcsec
s = 10
area = float(s**2)
#minimum smoothing length ~3x image PSF
if fwhm_pixels < 3.0*galaxy_psf_pixels:
fwhm_pixels = 3.0*galaxy_psf_pixels
sigma_pixels = fwhm_pixels/2.355
#use masked self.galaxy_image version
smoothed_image = scipy.ndimage.filters.gaussian_filter(self.galaxy_image,sigma_pixels,mode='nearest')
self.rpa_sigma_pixels = sigma_pixels
#compute surface brightness at petrosian radius
xi = np.float32(np.arange(self.npix+1))
xpos,ypos = np.meshgrid(xi,xi)
xmin,xmax = np.min(xpos-xcenter),np.max(xpos-xcenter)
ymin,ymax = np.min(ypos-ycenter),np.max(ypos-ycenter)
frac_overlap_rminus = photutils.geometry.elliptical_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r_ellip-1.0,(r_ellip-1.0)/self.elongation,self.pa_radians,1,3)
frac_overlap_rplus = photutils.geometry.elliptical_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r_ellip+1.0,(r_ellip+1.0)/self.elongation,self.pa_radians,1,3)
frac_overlap_annulus = frac_overlap_rplus - frac_overlap_rminus
area_in_annulus = np.sum(frac_overlap_annulus)
avg_flux_in_annulus = 0.0
err_in_annulus = 0.0
if area_in_annulus > 0.0:
avg_flux_in_annulus = np.sum(smoothed_image*frac_overlap_annulus)/area_in_annulus
err_in_annulus = ((1.0/area_in_annulus)*self.skysig**2)**0.5
#do we also want a S/N test here?
if avg_flux_in_annulus > 0.0:
#set pixels with flux >= mu equal to 10, < mu equal to 0.0
#initial calculation
self.galaxy_smoothed_segmap = np.where(smoothed_image >= avg_flux_in_annulus,10.0*np.ones_like(smoothed_image),np.zeros_like(smoothed_image))
#median filter to remove outlying pixels, useful mainly if rejecting CRs or bad pixels
if self.filter_segmap:
self.medfilt_segmap = scipy.ndimage.filters.uniform_filter(self.galaxy_smoothed_segmap,size=10,mode='constant',cval=0.0)-self.galaxy_smoothed_segmap/(100.0)
self.stdfilt = scipy.ndimage.filters.generic_filter(self.galaxy_smoothed_segmap,std_value,size=10,mode='constant',cval=0.0)
self.stdfilt = np.where(self.stdfilt > 0.0,self.stdfilt,np.zeros_like(self.stdfilt))
self.filtered_segmap = np.where( np.abs(self.galaxy_smoothed_segmap - self.medfilt_segmap) > 3.0*self.stdfilt, self.medfilt_segmap, self.galaxy_smoothed_segmap)
else:
self.filtered_segmap = 1.0*self.galaxy_smoothed_segmap
#issues with non-contiguous segmap get flagged later, or will have low S/N
#finally, anything that survives with a nonzero value is part of the galaxy
self.filtered_segmap = np.where(self.filtered_segmap > 0.001,10.0*np.ones_like(smoothed_image),np.zeros_like(smoothed_image))
return self.filtered_segmap#galaxy_segmap #
else:
#uhh, problem
return None
def compute_asym(self,xcenter=None,ycenter=None,extent=None,a_bkg=None):
assert xcenter is not None
assert ycenter is not None
assert extent is not None
assert a_bkg is not None
x0 = np.asarray([xcenter,ycenter])
OptimizeResult = scipy.optimize.minimize(self._asymmetry_wrapper, x0, args=(extent,a_bkg), method='Powell',options={'disp': False, 'return_all': False, 'maxiter': 400, 'maxfev': None, 'xtol': 0.1, 'ftol': 0.01})
if OptimizeResult.success:
asym = OptimizeResult.fun
xcen_a = OptimizeResult.x[0]
ycen_a = OptimizeResult.x[1]
else:
asym=99.0
xcen_a = xcenter
ycen_a = ycenter
message = OptimizeResult.message
return float(asym), xcen_a, ycen_a, message
def _asymmetry_wrapper(self,coords,radius=1.0,a_bkg=99.0):
asym, ga, ba = self.galaxy_asymmetry(coords,radius=radius,a_bkg=a_bkg)
return asym
def galaxy_asymmetry(self,coords,radius=1.0,a_bkg=99.0):
xc = coords[0]
yc = coords[1]
#don't let the asymmetry center wander off toward the edge of the image
if xc<=radius or yc<=radius or xc>=self.npix-radius or yc>=self.npix-radius:
asym=99.0
a_gal = 99.0
noise_offset = 99.0
else:
#following Lotz code from December 2013
#must confirm
analyze_image = self.galaxy_image
rot_gal_im = skimage.transform.rotate(analyze_image,180.0,center=(xc,yc),mode='constant',cval=0.0,preserve_range=True)
gal_dif = self.galaxy_image - rot_gal_im
xmin = 0.0-xc
xmax = self.npix+1 - xc
ymin = 0.0-yc
ymax = self.npix+1 - yc
frac_overlap_r = photutils.geometry.circular_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,radius,1,3)
norm = np.sum(frac_overlap_r) #area of galaxy
gal_im_ap = analyze_image*frac_overlap_r
gal_dif_ap = gal_dif*frac_overlap_r
total_gal_ap = np.sum(np.abs(gal_im_ap))
a_gal = np.sum(np.abs(gal_dif_ap))/total_gal_ap
#effectively divides by the average intensity per pixel
#a_bkg is already an area-normalized quantity (normed to background box area)
noise_offset = norm*a_bkg/total_gal_ap
asym = a_gal - noise_offset
return asym, a_gal, noise_offset
def rpetro_circ(self,xcenter=None,ycenter=None):
numpts=100#self.npix
#radius_grid = np.linspace(0.01,self.npix,num=numpts)
minr = 1.5
maxr = float(self.npix)/2.0
radius_grid = np.logspace(np.log10(minr),np.log10(maxr),num=numpts)
xi = np.float32(np.arange(self.npix+1))
xpos,ypos = np.meshgrid(xi,xi)
xmin,xmax = np.min(xpos-xcenter),np.max(xpos-xcenter)
ymin,ymax = np.min(ypos-ycenter),np.max(ypos-ycenter)
petro_ratio = np.zeros_like(radius_grid)
petro_r_pixels = 0.0
petro_r_pixels_err = -1.0
petro_ratio_ferr = np.zeros_like(radius_grid)
analyze_image = self.galaxy_image
for i,r in enumerate(radius_grid):
frac_overlap_r = photutils.geometry.circular_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r,1,3)
frac_overlap_08r = photutils.geometry.circular_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r-1.0,1,3)
frac_overlap_125r = photutils.geometry.circular_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r+1.0,1,3)
area_in_r = np.sum(frac_overlap_r)
avg_flux_in_r = np.sum(analyze_image*frac_overlap_r)/area_in_r
err_in_r = ((1.0/area_in_r)*self.skysig**2)**0.5 #error on average flux
frac_overlap_annulus = frac_overlap_125r - frac_overlap_08r
area_in_annulus = np.sum(frac_overlap_annulus)
avg_flux_in_annulus = 0.0
err_in_annulus = 0.0
if area_in_annulus > 0.0:
avg_flux_in_annulus = np.sum(analyze_image*frac_overlap_annulus)/area_in_annulus
err_in_annulus = ((1.0/area_in_annulus)*self.skysig**2)**0.5
if (avg_flux_in_r > 0.0) and (avg_flux_in_annulus > 0.0):
petro_ratio[i] = avg_flux_in_annulus/avg_flux_in_r
petro_ratio_ferr[i] = ( (err_in_r/avg_flux_in_r)**2 + (err_in_annulus/avg_flux_in_annulus)**2 )**0.5
elif (avg_flux_in_r > 0.0) and (avg_flux_in_annulus <= 0.0):
petro_ratio[i] = 0.0
petro_ratio_ferr[i] = -1.0
else:
#avg_flux_in_r is negative
petro_ratio[i] = 0.0
petro_ratio_ferr[i] = -1.0
#if zeros for awhile, break
if i > 25 and np.sum(petro_ratio[i-10:i+1])==0.0:
break
#evaluate RMS of r and annuli fluxes to estimate convergence? measure sigma_petro_ratio?
#evaluate petrosian ratio curve
if np.min(petro_ratio) > self.rpetro_eta:
status = 'Galaxy too large'
elif np.max(petro_ratio) < self.rpetro_eta:
#I think this will most often happen when all are zero
status = 'Galaxy too small or faint'
else:
#max is > 0.2 and min is < 0.2 --> it crosses at least once
#find first crossing and then interpolate
#demand that first crossing occurs after r=1.5 pix otherwise we're probably just seeing noise
eta_ind = np.where(np.logical_and(petro_ratio <= self.rpetro_eta,radius_grid > 1.5))[0]
if eta_ind.shape[0] > 0:
for ei in eta_ind:
if ei==0:
continue
elif petro_ratio[ei-1] > self.rpetro_eta:
#interpolate
petro_r_pixels = np.interp(self.rpetro_eta,np.flipud(petro_ratio[ei-1:ei+1]),np.flipud(radius_grid[ei-1:ei+1]))
delta_eta = np.abs(petro_ratio[ei-1] - petro_ratio[ei])
delta_r = np.abs(radius_grid[ei-1] - radius_grid[ei])
fsigma_eta = np.max(petro_ratio_ferr[ei-1:ei+1])
sigma_eta = self.rpetro_eta*fsigma_eta
petro_r_pixels_err = (((delta_r/delta_eta)**2)*(sigma_eta**2))**0.5 #conservative estimate of error on rpetro estimate
break
else:
continue
if petro_r_pixels<=0.0:
status = 'Weird light profile or too faint'
else:
status = 'Positive R_pet'
else:
#actually it never crosses: error
status = 'Error: problem with R_pet curve of growth'
return petro_r_pixels, status, petro_r_pixels_err
def rpetro_ellip(self,xcenter=None,ycenter=None):
numpts=100#self.npix
#radius_grid = np.linspace(0.01,self.npix,num=numpts)
minr = 1.5
maxr = float(self.npix)/2.0
radius_grid = np.logspace(np.log10(minr),np.log10(maxr),num=numpts)
xi = np.float32(np.arange(self.npix+1))
xpos,ypos = np.meshgrid(xi,xi)
xmin,xmax = np.min(xpos-xcenter),np.max(xpos-xcenter)
ymin,ymax = np.min(ypos-ycenter),np.max(ypos-ycenter)
petro_ratio = np.zeros_like(radius_grid)
petro_r_pixels = 0.0
petro_r_pixels_err = -1.0
petro_ratio_ferr = np.zeros_like(radius_grid)
analyze_image = self.galaxy_image
for i,r in enumerate(radius_grid):
#r = semimajor axis
ry = r/self.elongation #semiminor axis
frac_overlap_r = photutils.geometry.elliptical_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r,ry,self.pa_radians,1,3)
frac_overlap_08r = photutils.geometry.elliptical_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r-1.0,(r-1.0)/self.elongation,self.pa_radians,1,3)
frac_overlap_125r = photutils.geometry.elliptical_overlap_grid(xmin,xmax,ymin,ymax,self.npix,self.npix,r+1.0,(r+1.0)/self.elongation,self.pa_radians,1,3)
area_in_r = np.sum(frac_overlap_r)
avg_flux_in_r = np.sum(analyze_image*frac_overlap_r)/area_in_r
err_in_r = ((1.0/area_in_r)*self.skysig**2)**0.5 #error on average flux
frac_overlap_annulus = frac_overlap_125r - frac_overlap_08r
area_in_annulus = np.sum(frac_overlap_annulus)
avg_flux_in_annulus = 0.0
err_in_annulus = 0.0
if area_in_annulus > 0.0:
avg_flux_in_annulus = np.sum(analyze_image*frac_overlap_annulus)/area_in_annulus
err_in_annulus = ((1.0/area_in_annulus)*self.skysig**2)**0.5
if (avg_flux_in_r > 0.0) and (avg_flux_in_annulus > 0.0):
petro_ratio[i] = avg_flux_in_annulus/avg_flux_in_r
petro_ratio_ferr[i] = ( (err_in_r/avg_flux_in_r)**2 + (err_in_annulus/avg_flux_in_annulus)**2 )**0.5
elif (avg_flux_in_r > 0.0) and (avg_flux_in_annulus <= 0.0):
petro_ratio[i] = 0.0
petro_ratio_ferr[i] = -1.0
else:
#avg_flux_in_r is negative
petro_ratio[i] = 0.0
petro_ratio_ferr[i] = -1.0
#if zeros for awhile, break
if i > 25 and np.sum(petro_ratio[i-10:i+1])==0.0:
break
#evaluate RMS of r and annuli fluxes to estimate convergence? measure sigma_petro_ratio?
#evaluate petrosian ratio curve
if np.min(petro_ratio) > self.rpetro_eta:
status = 'Galaxy too large or unexpectedly elongated'
elif np.max(petro_ratio) < self.rpetro_eta:
#I think this will most often happen when all are zero
status = 'Galaxy too small or faint'
else:
#max is > 0.2 and min is < 0.2 --> it crosses at least once
#find first crossing and then interpolate
#demand that first crossing occurs after r=1.5 pix otherwise we're probably just seeing noise
eta_ind = np.where(np.logical_and(petro_ratio <= self.rpetro_eta,radius_grid > 1.5))[0]
if eta_ind.shape[0] > 0:
for ei in eta_ind:
if ei==0:
continue
elif petro_ratio[ei-1] > self.rpetro_eta:
#interpolate
petro_r_pixels = np.interp(self.rpetro_eta,np.flipud(petro_ratio[ei-1:ei+1]),np.flipud(radius_grid[ei-1:ei+1]))
delta_eta = np.abs(petro_ratio[ei-1] - petro_ratio[ei])
delta_r = np.abs(radius_grid[ei-1] - radius_grid[ei])
fsigma_eta = np.max(petro_ratio_ferr[ei-1:ei+1])
sigma_eta = self.rpetro_eta*fsigma_eta
petro_r_pixels_err = (((delta_r/delta_eta)**2)*(sigma_eta**2))**0.5 #conservative estimate of error on rpetro estimate
break
else:
continue
if petro_r_pixels<=0.0:
status = 'Weird light profile or too faint'
else:
status = 'Positive R_pet'
else:
#actually it never crosses: error
status = 'Error: problem with R_pet curve of growth'
return petro_r_pixels, status, petro_r_pixels_err
def init_from_synthetic_image(self,data_hdu,segmap_hdu,photutils_hdu,cm_hdu):
self.morphtype='Synthetic Image'
#inputs required by IDL code:
# morph_input_obj.write('# IMAGE NPIX PSF SCALE SKY XC YC A/B PA SKYBOX MAG MAGER DM RPROJ[arcsec] ZEROPT[mag?] \n')
#image FITS filename
self.imagefile=data_hdu.header['THISFILE']
self.image = data_hdu.data
self.segmap = segmap_hdu.data #general segmap containing multiple objects/labels
self.clabel = segmap_hdu.header['CLABEL'] #label corresponding to targeted object
#setting for doing sigma clip on internal segmap. Not very efficient in SciPy versus IDL (why?)
#avoid if simulated images -- not necessary if we don't expect awful pixels
self.filter_segmap = False
#final input for lotzmorph
self.galaxy_segmap = np.where(self.segmap==self.clabel,self.segmap,np.zeros_like(self.segmap))
#final image masks other objects
self.galaxy_image = np.where(np.logical_or(self.segmap==self.clabel,self.segmap==0),self.image,np.zeros_like(self.image))
#number of pixels in image
self.npix = data_hdu.header['NPIX']
xi = np.float32(np.arange(self.npix))+0.50 #center locations of pixels
self.pixel_xpos,self.pixel_ypos = np.meshgrid(xi,xi)
#psf in arcsec
self.psf_fwhm_arcsec = data_hdu.header['APROXPSF']
#scale = pixel size in arcsec
self.pixelscale_arcsec = data_hdu.header['PIXSCALE']
self.psf_fwhm_pixels = self.psf_fwhm_arcsec/self.pixelscale_arcsec
#physical scale in kpc, for funsies
self.kpc_per_arcsec = data_hdu.header['PSCALE']
#sky = background level in image
self.sky = data_hdu.header['SKY']
#x and y positions. MUST CONFIRM PYTHON ORDERING/locations, 0,1 as x,y seem ok for now
self.xcentroid = segmap_hdu.header['POS0']
self.ycentroid = segmap_hdu.header['POS1']
self.thisband_xcentroid = photutils_hdu.header['XCENTR']
self.thisband_ycentroid = photutils_hdu.header['YCENTR']
#a/b I'm guessing this is the elongation parameter?
self.elongation = photutils_hdu.header['ELONG']
assert (self.elongation > 0.0)
#PA position angle. WHAT UNITS?
self.pa_radians = photutils_hdu.header['ORIENT'] #this looks like it's in radians, counterclockwise (photutils)
#skybox. do we need this if we know skysig?
self.skysig = data_hdu.header['SKYSIG']
#create arbitrary perfect noise image matching synthetic image properties
#this is okay if noise is perfectly uniform gaussian right?
self.skybox = self.skysig*np.random.randn(50,50)
bc1 = float(self.skybox.shape[0]-1)/2.0
bc2 = float(self.skybox.shape[1]-1)/2.0
self.rot_skybox = skimage.transform.rotate(self.skybox,180.0,center=(bc1,bc2),mode='constant',cval=0.0,preserve_range=True)
#AB magnitude best ... "observed" ? aperture mags? segment mags?
self.magtot_intrinsic = data_hdu.header['MAG']
self.magtot_observed = data_hdu.header['NEWMAG'] #-1 = bad
self.magseg = photutils_hdu.header['SEGMAG'] #-1 = bad
self.magseg_err = photutils_hdu.header['SEGMAGE'] #-1 = bad
#distance modulus
self.dm = data_hdu.header['DISTMOD']
#redshift, because why not
self.redshift = data_hdu.header['REDSHIFT']
#rproj (arcsec)
self.rproj_pix = photutils_hdu.header['EQ_RAD'] #pixels
self.rproj_arcsec = self.rproj_pix*self.pixelscale_arcsec
#AB magnitude zeropoint
self.abzp = data_hdu.header['ABZP']
#photutils central moments
self.photutils_central_moments=cm_hdu.data
#compute scale invariant moments
scale_invariant_moments = self.compute_scale_invariant_moments()
#compute hu translation,scale,rotation invariant moments
hu_moments = self.compute_hu_moments()
#Flusser & Suk 1993 Affine-invariant moments
#nice because they are the lowest-order (less noisy) & sensitive to symmetric objects
#note they are NOT blur invariant, but I think this is desirable (assert that PSF << features)
fs93_moments = self.compute_fs93_moments()
#Possible new Asymmetry indicators?:
#1/4 of summed magnitude of 4 3rd-order image moments
#Should be manifestly zero for symmetric objects
#rotation, translation, and scale invariant
moment_asymmetry = 0.0
magn_asym_sum = np.abs(scale_invariant_moments[3,0]) + np.abs(scale_invariant_moments[0,3]) + np.abs(scale_invariant_moments[2,1]) + np.abs(scale_invariant_moments[1,2])
if magn_asym_sum > 0.0:
self.m_a = np.log10(magn_asym_sum)
else:
self.m_a = 0.0
#log mag of fs93 I2 affine-invariant moment (3rd-order only)
#mag of I2
mag_I2 = np.abs(fs93_moments[1])
if mag_I2 > 0.0:
self.m_I2 = np.log10(mag_I2)
else:
self.m_I2 = 0.0
dummy_array = np.asarray([0.0])
if self.morphtype != 'Synthetic Image':
self.morph_hdu = pyfits.ImageHDU(dummy_array)
self.morph_hdu.header['Image']=('dummy', 'What data does this image contain?')
else:
self.morph_hdu = pyfits.ImageHDU(self.scale_invariant_moments)
self.morph_hdu.header['Image']=('Scale Invariant Moments', 'What data does this image contain?')
self.morph_hdu.header['DESC']=(self.description)
self.morph_hdu.header['TYPE']=(self.morphtype,'What kind of image was analyzed?')
self.morph_hdu.header['Date']=(datetime.datetime.now().date().isoformat())
return self
##work-in-progress
def init_from_panstarrs_image(self,data_hdu,weight_hdu,segmap_hdu,se_catalog):
self.morphtype='PanSTARRS Image'
#se_catalog is just a single-entry ascii table after deciphering SE calc
#inputs required by IDL code:
# morph_input_obj.write('# IMAGE NPIX PSF SCALE SKY XC YC A/B PA SKYBOX MAG MAGER DM RPROJ[arcsec] ZEROPT[mag?] \n')
xmin = se_catalog['XMIN_IMAGE']
xmax = se_catalog['XMAX_IMAGE']
ymin = se_catalog['YMIN_IMAGE']
ymax = se_catalog['YMAX_IMAGE']
#assume object is at center with significant buffer
xspan = xmax-xmin
yspan = ymax-ymin
span = np.max(np.asarray([xspan,yspan]))+80
##TEMPORARY Assume image roughly centered
new_xmin = long ( se_catalog['X_IMAGE'] - span/2 )
new_xmax = long ( se_catalog['X_IMAGE'] + span/2 )
new_ymin = long ( se_catalog['Y_IMAGE'] - span/2 )
new_ymax = long ( se_catalog['Y_IMAGE'] + span/2 )
#image FITS filename
self.imagefile= data_hdu.fileinfo()['file'].name #data_hdu.header['THISFILE']
#SE = backwards indices than Python ?
#confirmed with imshow/segmaps
self.image = data_hdu.data[new_ymin:new_ymax,new_xmin:new_xmax]
self.segmap = segmap_hdu.data[new_ymin:new_ymax,new_xmin:new_xmax] #general segmap containing multiple objects/labels
self.clabel = se_catalog['NUMBER'] #label corresponding to targeted object
#setting for doing sigma clip on internal segmap. Not very efficient in SciPy versus IDL (why?)
#avoid if simulated images -- not necessary if we don't expect awful pixels
self.filter_segmap = False
#final input for lotzmorph
self.galaxy_segmap = np.where(self.segmap==self.clabel,self.segmap,np.zeros_like(self.segmap))
#final image masks other objects
self.galaxy_image = np.where(np.logical_or(self.segmap==self.clabel,self.segmap==0),self.image,np.zeros_like(self.image))
#number of pixels in image
self.npix = self.image.shape[0]
xi = np.float32(np.arange(self.npix))+0.50 #center locations of pixels
self.pixel_xpos,self.pixel_ypos = np.meshgrid(xi,xi)
#psf in arcsec
self.psf_fwhm_arcsec = 1.4 #data_hdu.header['APROXPSF']
#scale = pixel size in arcsec
self.pixelscale_arcsec = np.abs( data_hdu.header['CD1_1']*arcsec_per_radian() )
self.psf_fwhm_pixels = self.psf_fwhm_arcsec/self.pixelscale_arcsec
#physical scale in kpc, for funsies
self.kpc_per_arcsec = None #data_hdu.header['PSCALE']
#sky = background level in image
self.sky = 0.0 #data_hdu.header['SKY']
#x and y positions. MUST CONFIRM PYTHON ORDERING/locations, 0,1 as x,y seem ok for now
self.xcentroid = se_catalog['Y_IMAGE']-new_ymin #segmap_hdu.header['POS0']
self.ycentroid = se_catalog['X_IMAGE']-new_xmin #segmap_hdu.header['POS1']
self.thisband_xcentroid = self.ycentroid*1.0-new_ymin #photutils_hdu.header['XCENTR']
self.thisband_ycentroid = self.xcentroid*1.0-new_xmin #photutils_hdu.header['YCENTR']
#a/b I'm guessing this is the elongation parameter?
self.elongation = se_catalog['ELONGATION']
assert (self.elongation > 0.0)
#PA position angle. WHAT UNITS?
self.pa_radians = se_catalog['THETA_IMAGE']*(math.pi/180.0) #this looks like it's in radians, counterclockwise (photutils)
#skybox. do we need this if we know skysig?
self.skysig = 1.0 #data_hdu.header['SKYSIG']
#create arbitrary perfect noise image matching synthetic image properties
#this is okay if noise is perfectly uniform gaussian right?
self.skybox = self.skysig*np.random.randn(50,50)
bc1 = float(self.skybox.shape[0]-1)/2.0
bc2 = float(self.skybox.shape[1]-1)/2.0
self.rot_skybox = skimage.transform.rotate(self.skybox,180.0,center=(bc1,bc2),mode='constant',cval=0.0,preserve_range=True)
#AB magnitude best ... "observed" ? aperture mags? segment mags?
#self.magtot_intrinsic = data_hdu.header['MAG']
#self.magtot_observed = data_hdu.header['NEWMAG'] #-1 = bad
self.magseg = se_catalog['MAG_AUTO'] #-1 = bad
self.magseg_err = se_catalog['MAGERR_AUTO'] #-1 = bad
#distance modulus
self.dm = None #data_hdu.header['DISTMOD']
#redshift, because why not
self.redshift = None #data_hdu.header['REDSHIFT']
#rproj (arcsec)
self.rproj_pix = 5.0 #photutils_hdu.header['EQ_RAD'] #pixels
self.rproj_arcsec = self.rproj_pix*self.pixelscale_arcsec
#AB magnitude zeropoint
self.abzp = None #data_hdu.header['ABZP']
self.m_a = 0.0
dummy_array = np.asarray([0.0])
self.morph_hdu = pyfits.ImageHDU(dummy_array)
self.morph_hdu.header['Image']=('dummy', 'What data does this image contain?')
self.morph_hdu.header['DESC']=(self.description)
self.morph_hdu.header['TYPE']=(self.morphtype,'What kind of image was analyzed?')
self.morph_hdu.header['Date']=(datetime.datetime.now().date().isoformat())
return self
def compute_scale_invariant_moments(self):
scale_inv_moments = np.zeros_like(self.photutils_central_moments)
mu_00 = self.photutils_central_moments[0,0]
for i in np.arange(4):
for j in np.arange(4):
scale_inv_moments[i,j]=self.photutils_central_moments[i,j]/(mu_00**(1.0+(float(i)+float(j))/2.0))
self.scale_invariant_moments = scale_inv_moments
return scale_inv_moments
def compute_fs93_moments(self):
mu = self.photutils_central_moments
self.fs93_I1 = (mu[2,0]*mu[0,2] - mu[1,1]**2)/(mu[0,0]**4)
self.fs93_I2 = ((mu[3,0]**2)*(mu[0,3]**2) - 6.0*mu[3,0]*mu[2,1]*mu[1,2]*mu[0,3] + 4.0*mu[3,0]*(mu[1,2]**3) + 4.0*(mu[2,1]**3)*mu[0,3] - 3.0*(mu[2,1]**2)*(mu[1,2]**2))/(mu[0,0]**10)
self.fs93_I3 = ( mu[2,0]*(mu[2,1]*mu[0,3] - mu[1,2]**2) - mu[1,1]*(mu[3,0]*mu[0,3]-mu[2,1]*mu[1,2]) + mu[0,2]*(mu[3,0]*mu[1,2] - mu[2,1]**2))/(mu[0,0]**7)
self.fs93_I4 = ((mu[2,0]**3)*(mu[0,3]**2) - \
6.0*(mu[2,0]**2)*mu[1,1]*mu[1,2]*mu[0,3] - \
6.0*(mu[2,0]**2)*mu[0,2]*mu[2,1]*mu[0,3] + \
9.0*(mu[2,0]**2)*mu[0,2]*(mu[1,2]**2) + \
12.0*mu[2,0]*(mu[1,1]**2)*mu[2,1]*mu[0,3] + \
6.0*mu[2,0]*mu[1,1]*mu[0,2]*mu[3,0]*mu[0,3] - \
18.0*mu[2,0]*mu[1,1]*mu[0,2]*mu[2,1]*mu[1,2] - \
8.0*(mu[1,1]**3)*mu[3,0]*mu[0,3] - \
6.0*mu[2,0]*(mu[0,2]**2)*mu[3,0]*mu[1,2] + \
9.0*mu[2,0]*(mu[0,2]**2)*(mu[2,1]**2) + \
12.0*(mu[1,1]**2)*mu[0,2]*mu[3,0]*mu[1,2] - \
6.0*mu[1,1]*(mu[0,2]**2)*mu[3,0]*mu[2,1] + \
(mu[0,2]**3)*(mu[3,0]**2))/(mu[0,0]**11)
self.fs93_moments = np.asarray([self.fs93_I1,self.fs93_I2,self.fs93_I3,self.fs93_I4])
return self.fs93_moments
def compute_hu_moments(self):
ssim = self.scale_invariant_moments
self.hu_I1 = ssim[2,0] + ssim[0,2]
self.hu_I2 = (ssim[2,0]-ssim[0,2])**2 + 4.0*ssim[1,1]**2
self.hu_I3 = (ssim[3,0]-3.0*ssim[1,2])**2 + (3.0*ssim[2,1]-ssim[0,3])**2
self.hu_I4 = (ssim[3,0]+ssim[1,2])**2 + (ssim[2,1]+ssim[0,3])**3
self.hu_I5 = (ssim[3,0]-3.0*ssim[1,2])*(ssim[3,0]+ssim[1,2])*((ssim[3,0]+ssim[1,2])**2 - 3.0*(ssim[2,1]+ssim[0,3])**2)+\
(3.0*ssim[2,1]-ssim[0,3])*(ssim[2,1]+ssim[0,3])*(3.0*(ssim[3,0]+ssim[1,2])**2-(ssim[2,1]+ssim[0,3])**2)
self.hu_I6 = (ssim[2,0]-ssim[0,2])*( (ssim[3,0]+ssim[1,2])**2 - (ssim[2,1]+ssim[0,3])**2 ) + 4.0*(ssim[3,0]+ssim[1,2])*(ssim[2,1]+ssim[0,3])
self.hu_I7 = (3.0*ssim[2,1]-ssim[0,3])*(ssim[3,0]+ssim[1,2])*((ssim[3,0]+ssim[1,2])**2 - 3.0*(ssim[2,1]+ssim[0,3])**2)-\
(ssim[3,0]-3.0*ssim[1,2])*(ssim[2,1]+ssim[0,3])*(3.0*(ssim[3,0]+ssim[1,2])**2-(ssim[2,1]+ssim[0,3])**2)
self.hu_I8 = ssim[1,1]*((ssim[3,0]+ssim[1,2])**2 - (ssim[2,1]+ssim[0,3])**2) - (ssim[2,0]-ssim[0,2])*(ssim[3,0]+ssim[1,2])*(ssim[2,1]+ssim[0,3])
self.hu_moments = np.asarray([self.hu_I1,self.hu_I2,self.hu_I3,self.hu_I4,self.hu_I5,self.hu_I6,self.hu_I7,self.hu_I8])
return self.hu_moments
def lotz_central_moments(self, i_list, j_list, xc=None, yc=None):
new_image = np.where(self.segmap==self.clabel,self.image,np.zeros_like(self.image))
mu_ij = central_moments(new_image,i_list,j_list,xc=xc,yc=yc)
return np.asarray(mu_ij)
def return_measurement_HDU(self):
self.morph_hdu.header['Status']=(self.lotz_morph_status)
if str.find(self.lotz_morph_status,'Error') >= 0:
self.morph_hdu.header['FLAG']=(1,'Indicates error in morphology status')
self.flag = 1
else:
self.morph_hdu.header['FLAG']=(0,'Normal completion')
self.flag = 0
self.morph_hdu.header['M_A'] = (self.m_a,'Log10 sum of 3rd-order scale+ invariant moments')
if self.hu_moments[0] != None:
self.morph_hdu.header['Hu_I1']=(self.hu_I1,'1st rotation invariant moment (Hu 1962)')
self.morph_hdu.header['Hu_I2']=(self.hu_I2,'2nd rotation invariant moment (Hu 1962)')
self.morph_hdu.header['Hu_I3']=(self.hu_I3,'3rd rotation invariant moment (Hu 1962)')
self.morph_hdu.header['Hu_I4']=(self.hu_I4,'4th rotation invariant moment (Hu 1962)')
self.morph_hdu.header['Hu_I5']=(self.hu_I5,'5th rotation invariant moment (Hu 1962)')
self.morph_hdu.header['Hu_I6']=(self.hu_I6,'6th rotation invariant moment (Hu 1962)')
self.morph_hdu.header['Hu_I7']=(self.hu_I7,'7th rotation invariant moment (Hu 1962)')
self.morph_hdu.header['Hu_I8']=(self.hu_I8,'8th rotation invariant moment (Hu 1962)')
if self.fs93_moments[0] != None:
self.morph_hdu.header['FS93_I1']=(self.fs93_I1,'1st affine-invariant moment (Flusser & Suk 93)')
self.morph_hdu.header['FS93_I2']=(self.fs93_I2,'2nd affine-invariant moment (Flusser & Suk 93)')
self.morph_hdu.header['FS93_I3']=(self.fs93_I3,'3rd affine-invariant moment (Flusser & Suk 93)')
self.morph_hdu.header['FS93_I4']=(self.fs93_I4,'4th affine-invariant moment (Flusser & Suk 93)')
self.morph_hdu.header['M_I2']=(self.m_I2,'Log10 Magnitude of FS93_I2')
return self.morph_hdu
def return_rpa_segmap_hdu(self):
if self.petro_segmap is not None:
rpa_seg_hdu = pyfits.ImageHDU(self.petro_segmap)
rpa_seg_hdu.header['EXTNAME']='APSEGMAP'
return rpa_seg_hdu
else:
return None
def write_idl_input_line(self,idl_filename):
fo = open(idl_filename,'a')
fitsfn = self.imagefile
ababszp = self.abzp
dm_im = self.dm
apparent_mag = self.magseg
absolute_mag = apparent_mag - dm_im
me = self.magseg_err
npix = (self.galaxy_image).shape[0]
center = int(float(npix)/2.0)
psfval=self.psf_fwhm_arcsec
scaleval = self.pixelscale_arcsec
ab = self.elongation
#idl input in degrees, apparently, also +90deg ?
pa = (180.0/math.pi)*self.pa_radians + 90.0
xc = int(self.xcentroid)
yc = int(self.ycentroid)
rproj = self.rproj_arcsec
input_string = '{:80s}{:8d}{:10.3f}{:10.3f}{:10.3f}{:8d}{:8d}{:10.3f}{:7.1f}{:5.1f}{:5.1f}{:5.1f}{:5.1f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}\n'.format(fitsfn,npix,psfval,scaleval,0.0,xc,yc,
ab,pa,1.0,21.0,1.0,21.0,absolute_mag,me,dm_im,rproj,ababszp)
fo.write(input_string)
fo.close()
return
def write_py_output_line(self,python_outfile):
fo = open(python_outfile,'a')
fitsfn = self.imagefile
pa = (180.0/math.pi)*self.pa_radians + 90.0
#Galaxy DM dRproj" ABMag ABMager <S/N> R(1/2)c R(1/2)e R_pet_c R_pet_e AB PA A_XC A_YC M_XC M_YC C r_20 r_80 Asym S Gini M_20 Flag Cnts
if self.morph_hdu.header['CFLAG'] != 1:
output_string = '{:80s}{:10.3f}{:10.3f}{:10.3f}{:10.3f}'\
'{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}'\
'{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}'\
'{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}\n'.format(fitsfn,self.dm,self.rproj_arcsec,
self.magseg,self.magseg_err,
self.snpix,0.0,0.0,self.rp_circ_2,self.rp_ellip,self.elongation,pa,
self.xcen_a2,self.ycen_a2,self.mxc,self.myc,self.cc,self.r20,self.r80,
self.asym2,0.0,self.gini,self.m20,self.flag,0.0)
else:
output_string = '{:80s}{:10.3f}{:10.3f}{:10.3f}{:10.3f}'\
'{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}'\
'{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}'\
'{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}{:10.3f}\n'.format(fitsfn,self.dm,self.rproj_arcsec,
self.magseg,self.magseg_err,
self.snpix,0.0,0.0,self.rp_circ_2,self.rp_ellip,self.elongation,pa,
self.xcen_a2,self.ycen_a2,self.mxc,self.myc,-1,-1,-1,
self.asym2,0.0,self.gini,self.m20,self.cflag,0.0)
fo.write(output_string)
fo.close()
return
#input galaxy image HDU, segmap HDU, and photutils info HDU,
#return FITS HDU containing non-parametric morphology measurements either in the header or a FITS table HDU
def morph_from_synthetic_image(data_hdu,segmap_hdu,photutils_hdu,cm_hdu,extname='LotzMorphMeasurements',idl_filename=None,python_outfile=None,outobject=None):
#unpack HDUs and send to generic Lotz morphology code
galdataobject = galdata()
galdataobject = galdataobject.init_from_synthetic_image(data_hdu,segmap_hdu,photutils_hdu,cm_hdu)
result = galdataobject.run_lotz_morphs()
morph_hdu = galdataobject.return_measurement_HDU()
morph_hdu.header['EXTNAME']=extname
rpa_seg_hdu = galdataobject.return_rpa_segmap_hdu()
if idl_filename is not None and galdataobject.flag==0:
galdataobject.write_idl_input_line(idl_filename)
#also write output files?
if python_outfile is not None and galdataobject.flag==0:
galdataobject.write_py_output_line(python_outfile)
outobject = copy.copy(galdataobject)
return morph_hdu, rpa_seg_hdu
#work-in-progress
def morph_from_panstarrs_image(image_hdu,weight_hdu,segmap_hdu,se_catalog,extname='StatMorphMeasurements',idl_filename=None,python_outfile=None,outobject=None):
galdataobject = galdata()
galdataobject = galdataobject.init_from_panstarrs_image(image_hdu,weight_hdu,segmap_hdu,se_catalog)
result = galdataobject.run_lotz_morphs()
morph_hdu = galdataobject.return_measurement_HDU()
morph_hdu.header['EXTNAME']=extname
rpa_seg_hdu = galdataobject.return_rpa_segmap_hdu()
if idl_filename is not None and galdataobject.flag==0:
galdataobject.write_idl_input_line(idl_filename)
#also write output files?
if python_outfile is not None and galdataobject.flag==0:
galdataobject.write_py_output_line(python_outfile)
outobject = copy.copy(galdataobject)
return morph_hdu, rpa_seg_hdu, galdataobject
|
gsnyder206/synthetic-image-morph
|
statmorph_gfs.py
|
Python
|
gpl-2.0
| 82,960
|
[
"Galaxy",
"Gaussian"
] |
853f5b8f127ffe3ac100bef1656f11ad79001a28fbc7399ab2543c2d0026ae9a
|
""" This is a test of using WMSClient and several other functions in WMS
In order to run this test we need the following DBs installed:
- JobDB
- JobLoggingDB
- TaskQueueDB
- SandboxMetadataDB
And the following services should also be on:
- OptimizationMind
- JobManager
- SandboxStore
- JobMonitoring
- JobStateUpdate
- WMSAdministrator
- Matcher
A user proxy is also needed to submit,
and the Framework/ProxyManager need to be running with a such user proxy already uploaded.
Due to the nature of the DIRAC WMS, only a full chain test makes sense,
and this also means that this test is not easy to set up.
"""
# pylint: disable=protected-access,wrong-import-position,invalid-name
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import unittest
import sys
import datetime
import time
# from mock import Mock
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.tests.Utilities.utils import find_all
from DIRAC.tests.Utilities.WMS import helloWorldJob, parametricJob, createFile
from DIRAC import gLogger
from DIRAC.Interfaces.API.Job import Job
from DIRAC.WorkloadManagementSystem.Client import JobStatus
from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient
from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient
from DIRAC.WorkloadManagementSystem.Client.WMSAdministratorClient import WMSAdministratorClient
from DIRAC.WorkloadManagementSystem.Client.MatcherClient import MatcherClient
from DIRAC.WorkloadManagementSystem.Agent.JobCleaningAgent import JobCleaningAgent
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
class TestWMSTestCase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
gLogger.setLevel('DEBUG')
def tearDown(self):
""" use the JobCleaningAgent method to remove the jobs in status Deleted and Killed
"""
jca = JobCleaningAgent('WorkloadManagement/JobCleaningAgent',
'WorkloadManagement/JobCleaningAgent')
jca.initialize()
res = jca.removeJobsByStatus({'Status': [JobStatus.KILLED, JobStatus.DELETED]})
self.assertTrue(res['OK'], res.get('Message'))
class WMSChain(TestWMSTestCase):
def test_FullChain(self):
""" This test will
- call all the WMSClient methods
that will end up calling all the JobManager service methods
- use the JobMonitoring to verify few properties
- call the JobCleaningAgent to eliminate job entries from the DBs
"""
wmsClient = WMSClient()
jobMonitor = JobMonitoringClient()
jobStateUpdate = JobStateUpdateClient()
# create the job
job = helloWorldJob()
jobDescription = createFile(job)
# submit the job
res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription))
self.assertTrue(res['OK'], res.get('Message'))
self.assertTrue(isinstance(res['Value'], int), msg="Got %s" % type(res['Value']))
self.assertEqual(res['Value'], res['JobID'],
msg="Got %s, expected %s" % (str(res['Value']), res['JobID']))
jobID = res['JobID']
jobID = res['Value']
# updating the status
res = jobStateUpdate.setJobStatus(jobID, JobStatus.RUNNING, 'Executing Minchiapp', 'source')
self.assertTrue(res['OK'], res.get('Message'))
# reset the job
res = wmsClient.resetJob(jobID)
self.assertTrue(res['OK'], res.get('Message'))
# reschedule the job
res = wmsClient.rescheduleJob(jobID)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobsStatus(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'][jobID]['Status'], JobStatus.RECEIVED, msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobsMinorStatus([jobID])
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], {jobID: {'MinorStatus': 'Job Rescheduled'}},
msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobsApplicationStatus([jobID])
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], {jobID: {'ApplicationStatus': 'Unknown'}},
msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobsStates([jobID])
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], {jobID: {'Status': JobStatus.RECEIVED,
'MinorStatus': 'Job Rescheduled',
'ApplicationStatus': 'Unknown'}},
msg="Got %s" % str(res['Value']))
# updating the status again
res = jobStateUpdate.setJobStatus(jobID, JobStatus.CHECKING, 'checking', 'source')
self.assertTrue(res['OK'], res.get('Message'))
res = jobStateUpdate.setJobStatus(jobID, JobStatus.WAITING, 'waiting', 'source')
self.assertTrue(res['OK'], res.get('Message'))
res = jobStateUpdate.setJobStatus(jobID, JobStatus.MATCHED, 'matched', 'source')
self.assertTrue(res['OK'], res.get('Message'))
# kill the job
res = wmsClient.killJob(jobID)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobsStatus(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'][jobID]['Status'], JobStatus.KILLED, msg="Got %s" % str(res['Value']))
# delete the job - this will just set its status to "deleted"
res = wmsClient.deleteJob(jobID)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobsStatus(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'][jobID]['Status'], JobStatus.DELETED, msg="Got %s" % str(res['Value']))
def test_ParametricChain(self):
""" This test will submit a parametric job which should generate 3 actual jobs
"""
wmsClient = WMSClient()
jobStateUpdate = JobStateUpdateClient()
jobMonitor = JobMonitoringClient()
# create the job
job = parametricJob()
jobDescription = createFile(job)
# submit the job
res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription))
self.assertTrue(res['OK'], res.get('Message'))
jobIDList = res['Value']
self.assertEqual(len(jobIDList), 3, msg="Got %s" % str(jobIDList))
res = jobMonitor.getJobsParameters(jobIDList, ['JobName'])
self.assertTrue(res['OK'], res.get('Message'))
jobNames = [res['Value'][jobID]['JobName'] for jobID in res['Value']]
self.assertEqual(set(jobNames), set(['parametric_helloWorld_%s' % nJob for nJob in range(3)]))
for jobID in jobIDList:
res = jobStateUpdate.setJobStatus(jobID, JobStatus.CHECKING, 'checking', 'source')
self.assertTrue(res['OK'], res.get('Message'))
res = wmsClient.deleteJob(jobIDList)
self.assertTrue(res['OK'], res.get('Message'))
print(res)
for jobID in jobIDList:
res = jobMonitor.getJobsStatus(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'][jobID]['Status'], JobStatus.DELETED, msg="Got %s" % str(res['Value']))
class JobMonitoring(TestWMSTestCase):
def test_JobStateUpdateAndJobMonitoring(self):
""" Verifying all JobStateUpdate and JobMonitoring functions
"""
wmsClient = WMSClient()
jobMonitor = JobMonitoringClient()
jobStateUpdate = JobStateUpdateClient()
# create a job and check stuff
job = helloWorldJob()
jobDescription = createFile(job)
# submitting the job. Checking few stuff
res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription))
self.assertTrue(res['OK'], res.get('Message'))
jobID = int(res['Value'])
# jobID = res['JobID']
res = jobMonitor.getJobJDL(jobID, True)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobJDL(jobID, False)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobsParameters([jobID], [])
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobOwner(jobID)
self.assertTrue(res['OK'], res.get('Message'))
# Adding stuff
# forcing the update
res = jobStateUpdate.setJobStatus(jobID, JobStatus.RUNNING, 'running', 'source', None, True)
self.assertTrue(res['OK'], res.get('Message'))
res = jobStateUpdate.setJobParameters(jobID, [('par1', 'par1Value'), ('par2', 'par2Value')])
time.sleep(5)
self.assertTrue(res['OK'], res.get('Message'))
res = jobStateUpdate.setJobApplicationStatus(jobID, 'app status', 'source')
self.assertTrue(res['OK'], res.get('Message'))
# res = jobStateUpdate.setJobFlag()
# self.assertTrue(res['OK'], res.get('Message'))
# res = jobStateUpdate.unsetJobFlag()
# self.assertTrue(res['OK'], res.get('Message'))
res = jobStateUpdate.setJobSite(jobID, 'Site')
self.assertTrue(res['OK'], res.get('Message'))
# now checking few things
res = jobMonitor.getJobsStatus(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'][jobID]['Status'], JobStatus.RUNNING, msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobParameter(jobID, 'par1')
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], {'par1': 'par1Value'}, msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobParameters(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], {jobID: {'par1': 'par1Value', 'par2': 'par2Value'}},
msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobParameters(jobID, 'par1')
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], {jobID: {'par1': 'par1Value'}},
msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobAttribute(jobID, 'Site')
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], 'Site', msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobAttributes(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['ApplicationStatus'], 'app status',
msg="Got %s" % str(res['Value']['ApplicationStatus']))
self.assertEqual(res['Value']['JobName'], 'helloWorld',
msg="Got %s" % str(res['Value']['JobName']))
res = jobMonitor.getJobSummary(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['ApplicationStatus'], 'app status',
msg="Got %s" % str(res['Value']['ApplicationStatus']))
self.assertEqual(res['Value']['Status'], JobStatus.RUNNING,
msg="Got %s" % str(res['Value']['Status']))
res = jobMonitor.getJobHeartBeatData(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], [], msg="Got %s" % str(res['Value']))
res = jobMonitor.getInputData(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], [], msg="Got %s" % str(res['Value']))
res = jobMonitor.getJobSummary(jobID)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getAtticJobParameters(jobID)
self.assertTrue(res['OK'], res.get('Message'))
res = jobStateUpdate.setJobStatus(jobID, JobStatus.DONE, 'MinorStatus', 'Unknown')
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobSummary(jobID)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['Status'], JobStatus.DONE, msg="Got %s" % str(res['Value']['Status']))
self.assertEqual(res['Value']['MinorStatus'], 'MinorStatus',
msg="Got %s" % str(res['Value']['MinorStatus']))
self.assertEqual(res['Value']['ApplicationStatus'], 'app status',
msg="Got %s" % str(res['Value']['ApplicationStatus']))
res = jobStateUpdate.sendHeartBeat(jobID, {'bih': 'bih'}, {'boh': 'boh'})
self.assertTrue(res['OK'], res.get('Message'))
# delete the job - this will just set its status to "deleted"
wmsClient.deleteJob(jobID)
# # Adding a platform
# self.getDIRACPlatformMock.return_value = {'OK': False}
#
# job = helloWorldJob()
# job.setPlatform( "x86_64-slc6" )
#
# jobDescription = createFile( job )
#
# job.setCPUTime( 17800 )
# job.setBannedSites( ['LCG.CERN.ch', 'LCG.CNAF.it', 'LCG.GRIDKA.de', 'LCG.IN2P3.fr',
# 'LCG.NIKHEF.nl', 'LCG.PIC.es', 'LCG.RAL.uk', 'LCG.SARA.nl'] )
# res = WMSClient().submitJob( job._toJDL( xmlFile = jobDescription ) )
# self.assertTrue(res['OK'], res.get('Message'))
# self.assertEqual( type( res['Value'] ), int )
class JobMonitoringMore(TestWMSTestCase):
def test_JobStateUpdateAndJobMonitoringMultuple(self):
""" # Now, let's submit some jobs. Different sites, types, inputs
"""
wmsClient = WMSClient()
jobMonitor = JobMonitoringClient()
jobStateUpdate = JobStateUpdateClient()
jobIDs = []
lfnss = [['/a/1.txt', '/a/2.txt'], ['/a/1.txt', '/a/3.txt', '/a/4.txt'], []]
types = ['User', 'Test']
for lfns in lfnss:
for jobType in types:
job = helloWorldJob()
job.setDestination('DIRAC.Jenkins.ch')
job.setInputData(lfns)
job.setType(jobType)
jobDescription = createFile(job)
res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription))
self.assertTrue(res['OK'], res.get('Message'))
jobID = res['Value']
jobIDs.append(jobID)
res = jobMonitor.getSites()
print(res)
self.assertTrue(res['OK'], res.get('Message'))
self.assertTrue(set(res['Value']) <= {'ANY', 'DIRAC.Jenkins.ch', 'Site'}, msg="Got %s" % res['Value'])
res = jobMonitor.getJobTypes()
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(sorted(res['Value']), sorted(types), msg="Got %s" % str(sorted(res['Value'])))
res = jobMonitor.getApplicationStates()
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], ['Unknown'], msg="Got %s" % str(res['Value']))
res = jobMonitor.getOwners()
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getOwnerGroup()
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getProductionIds()
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobGroups()
self.assertTrue(res['OK'], res.get('Message'))
resJG_empty = res['Value']
res = jobMonitor.getJobGroups(None, datetime.datetime.utcnow())
self.assertTrue(res['OK'], res.get('Message'))
resJG_olderThanNow = res['Value']
self.assertEqual(resJG_empty, resJG_olderThanNow)
res = jobMonitor.getJobGroups(None, datetime.datetime.utcnow() - datetime.timedelta(days=365))
self.assertTrue(res['OK'], res.get('Message'))
resJG_olderThanOneYear = res['Value']
self.assertTrue(set(resJG_olderThanOneYear).issubset(set(resJG_olderThanNow)),
resJG_olderThanOneYear)
res = jobMonitor.getStates()
self.assertTrue(res['OK'], res.get('Message'))
self.assertTrue(sorted(res['Value']) in [[JobStatus.RECEIVED], sorted([JobStatus.RECEIVED, JobStatus.WAITING])],
res['Value'])
res = jobMonitor.getMinorStates()
self.assertTrue(res['OK'], res.get('Message'))
self.assertTrue(sorted(res['Value']) in [
['Job accepted'],
sorted(['Job accepted', 'Job Rescheduled'])],
res['Value'])
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobs()
self.assertTrue(res['OK'], res.get('Message'))
self.assertTrue(set([str(x) for x in jobIDs]) <= set(res['Value']),
res['Value'])
# res = jobMonitor.getCounters(attrList)
# self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobsSummary(jobIDs)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobPageSummaryWeb({}, [], 0, 100)
self.assertFalse(res['OK'], res.get('Value'))
res = jobStateUpdate.setJobStatusBulk(
jobID,
{str(datetime.datetime.utcnow()): {
'Status': JobStatus.CHECKING,
'MinorStatus': 'MinorStatus',
'Source': 'Unknown'}},
False
)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobSummary(int(jobID))
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['Status'], JobStatus.CHECKING)
self.assertEqual(res['Value']['MinorStatus'], 'MinorStatus')
res = jobStateUpdate.setJobStatusBulk(
jobID,
{str(datetime.datetime.utcnow() + datetime.timedelta(hours=1)): {
'Status': JobStatus.WAITING,
'MinorStatus': 'MinorStatus',
'Source': 'Unknown'},
str(datetime.datetime.utcnow() + datetime.timedelta(hours=2)): {
'Status': JobStatus.MATCHED,
'MinorStatus': 'MinorStatus-matched',
'Source': 'Unknown'}},
False
)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobSummary(int(jobID))
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['Status'], JobStatus.MATCHED)
self.assertEqual(res['Value']['MinorStatus'], 'MinorStatus-matched')
res = jobStateUpdate.setJobsParameter({jobID: ['Whatever', 'booh']})
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobSummary(int(jobID))
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['Status'], JobStatus.MATCHED)
self.assertEqual(res['Value']['MinorStatus'], 'MinorStatus-matched')
res = jobStateUpdate.setJobAttribute(jobID, 'Status', JobStatus.RUNNING)
self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobSummary(int(jobID))
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['Status'], JobStatus.RUNNING)
# delete the jobs - this will just set its status to "deleted"
wmsClient.deleteJob(jobIDs)
# def test_submitFail( self ):
#
# # Adding a platform that should not exist
# job = helloWorldJob()
# job.setPlatform( "notExistingPlatform" )
# jobDescription = createFile( job )
#
# res = WMSClient().submitJob( job._toJDL( xmlFile = jobDescription ) )
# self.assertTrue(res['OK'], res.get('Message'))
#
# WMSClient().deleteJob( res['Value'] )
class WMSAdministrator(TestWMSTestCase):
""" testing WMSAdmin - for JobDB
"""
def test_JobDBWMSAdmin(self):
wmsAdministrator = WMSAdministratorClient()
sitesList = ['My.Site.org', 'Your.Site.org']
res = wmsAdministrator.setSiteMask(sitesList)
self.assertTrue(res['OK'], res.get('Message'))
res = wmsAdministrator.getSiteMask()
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(sorted(res['Value']), sorted(sitesList), msg="Got %s" % str(sorted(res['Value'])))
res = wmsAdministrator.banSite('My.Site.org', 'This is a comment')
self.assertTrue(res['OK'], res.get('Message'))
res = wmsAdministrator.getSiteMask()
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(sorted(res['Value']), ['Your.Site.org'], msg="Got %s" % str(sorted(res['Value'])))
res = wmsAdministrator.allowSite('My.Site.org', 'This is a comment')
self.assertTrue(res['OK'], res.get('Message'))
res = wmsAdministrator.getSiteMask()
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(sorted(res['Value']), sorted(sitesList), msg="Got %s" % str(sorted(res['Value'])))
res = wmsAdministrator.getSiteMaskLogging(sitesList)
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['My.Site.org'][0][3], 'No comment',
msg="Got %s" % str(res['Value']['My.Site.org'][0][3]))
res = wmsAdministrator.getSiteMaskSummary()
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value']['My.Site.org'], 'Active', msg="Got %s" % res['Value']['My.Site.org'])
res = wmsAdministrator.getSiteSummaryWeb({}, [], 0, 100)
self.assertTrue(res['OK'], res.get('Message'))
self.assertTrue(res['Value']['TotalRecords'] in [0, 1, 2, 34])
res = wmsAdministrator.getSiteSummarySelectors()
self.assertTrue(res['OK'], res.get('Message'))
res = wmsAdministrator.clearMask()
self.assertTrue(res['OK'], res.get('Message'))
res = wmsAdministrator.getSiteMask()
self.assertTrue(res['OK'], res.get('Message'))
self.assertEqual(res['Value'], [], msg="Got %s" % str(res['Value']))
class Matcher (TestWMSTestCase):
"""Testing Matcher
"""
def test_matcher(self):
# insert a proper DN to run the test
resourceDescription = {
'OwnerGroup': 'prod',
'OwnerDN': '/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser',
'DIRACVersion': 'pippo',
'ReleaseVersion': 'blabla',
'VirtualOrganization': 'LHCb',
'PilotInfoReportedFlag': 'True',
'PilotBenchmark': 'anotherPilot',
'Site': 'DIRAC.Jenkins.ch',
'CPUTime': 86400}
wmsClient = WMSClient()
job = helloWorldJob()
job.setDestination('DIRAC.Jenkins.ch')
job.setInputData('/a/bbb')
job.setType('User')
jobDescription = createFile(job)
res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription))
self.assertTrue(res['OK'], res.get('Message'))
jobID = res['Value']
# forcing the update
res = JobStateUpdateClient().setJobStatus(jobID, JobStatus.WAITING, 'matching', 'source', None, True)
self.assertTrue(res['OK'], res.get('Message'))
tqDB = TaskQueueDB()
tqDefDict = {'OwnerDN': '/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser',
'OwnerGroup': 'prod', 'Setup': 'dirac-JenkinsSetup', 'CPUTime': 86400}
res = tqDB.insertJob(jobID, tqDefDict, 10)
self.assertTrue(res['OK'], res.get('Message'))
res = MatcherClient().requestJob(resourceDescription)
print(res)
self.assertTrue(res['OK'], res.get('Message'))
wmsClient.deleteJob(jobID)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestWMSTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(WMSChain))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(JobMonitoring))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(JobMonitoringMore))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(WMSAdministrator))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(Matcher))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
yujikato/DIRAC
|
tests/Integration/WorkloadManagementSystem/Test_Client_WMS.py
|
Python
|
gpl-3.0
| 22,709
|
[
"DIRAC"
] |
01405d63fe474adabaa2b4fd59d96371e7ea8385c9ae1ba0e2fbb1c50c2f167a
|
#!/usr/bin/env python
import bz2
import gzip
import json
import optparse
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from ftplib import FTP
CHUNK_SIZE = 2**20 # 1mb
def cleanup_before_exit(tmp_dir):
if tmp_dir and os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def _get_files_in_ftp_path(ftp, path):
path_contents = []
ftp.retrlines('MLSD %s' % (path), path_contents.append)
return [line.split(';')[-1].lstrip() for line in path_contents]
def _get_stream_readers_for_tar(file_obj, tmp_dir):
fasta_tar = tarfile.open(fileobj=file_obj, mode='r:*')
return [fasta_tar.extractfile(member) for member in fasta_tar.getmembers()]
def _get_stream_readers_for_zip(file_obj, tmp_dir):
fasta_zip = zipfile.ZipFile(file_obj, 'r')
rval = []
for member in fasta_zip.namelist():
fasta_zip.extract(member, tmp_dir)
rval.append(open(os.path.join(tmp_dir, member), 'rb'))
return rval
def _get_stream_readers_for_gzip(file_obj, tmp_dir):
return [gzip.GzipFile(fileobj=file_obj, mode='rb')]
def _get_stream_readers_for_bz2(file_obj, tmp_dir):
return [bz2.BZ2File(file_obj.name, 'rb')]
def download_from_ncbi(data_manager_dict, params, target_directory,
database_id, database_name):
NCBI_FTP_SERVER = 'ftp.ncbi.nlm.nih.gov'
NCBI_DOWNLOAD_PATH = '/blast/db/FASTA/'
COMPRESSED_EXTENSIONS = [('.tar.gz', _get_stream_readers_for_tar),
('.tar.bz2', _get_stream_readers_for_tar),
('.zip', _get_stream_readers_for_zip),
('.gz', _get_stream_readers_for_gzip),
('.bz2', _get_stream_readers_for_bz2)]
ncbi_identifier = params['reference_source']['requested_identifier']
ftp = FTP(NCBI_FTP_SERVER)
ftp.login()
path_contents = _get_files_in_ftp_path(ftp, NCBI_DOWNLOAD_PATH)
ncbi_file_name = None
get_stream_reader = None
ext = None
for ext, get_stream_reader in COMPRESSED_EXTENSIONS:
if "%s%s" % (ncbi_identifier, ext) in path_contents:
ncbi_file_name = "%s%s%s" % (NCBI_DOWNLOAD_PATH, ncbi_identifier, ext)
break
if not ncbi_file_name:
raise Exception('Unable to determine filename for NCBI database for %s: %s' % (ncbi_identifier, path_contents))
tmp_dir = tempfile.mkdtemp(prefix='tmp-data-manager-ncbi-')
ncbi_fasta_filename = os.path.join(tmp_dir, "%s%s" % (ncbi_identifier, ext))
# fasta_base_filename = "%s.fa" % database_id
# fasta_filename = os.path.join(target_directory, fasta_base_filename)
# fasta_writer = open(fasta_filename, 'wb+')
tmp_extract_dir = os.path.join(tmp_dir, 'extracted_fasta')
os.mkdir(tmp_extract_dir)
tmp_fasta = open(ncbi_fasta_filename, 'wb+')
ftp.retrbinary('RETR %s' % ncbi_file_name, tmp_fasta.write)
tmp_fasta.flush()
tmp_fasta.seek(0)
fasta_readers = get_stream_reader(tmp_fasta, tmp_extract_dir)
data_table_entry = _stream_fasta_to_file(fasta_readers, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
for fasta_reader in fasta_readers:
fasta_reader.close()
tmp_fasta.close()
cleanup_before_exit(tmp_dir)
def download_from_url(data_manager_dict, params, target_directory, database_id, database_name):
# TODO: we should automatically do decompression here
urls = list(filter(bool, [x.strip() for x in params['reference_source']['user_url'].split('\n')]))
fasta_reader = [urllib.request.urlopen(url) for url in urls]
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def download_from_history(data_manager_dict, params, target_directory, database_id, database_name):
# TODO: allow multiple FASTA input files
input_filename = params['reference_source']['input_fasta']
if isinstance(input_filename, list):
fasta_reader = [open(filename, 'rb') for filename in input_filename]
else:
fasta_reader = open(input_filename, 'rb')
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def copy_from_directory(data_manager_dict, params, target_directory, database_id, database_name):
input_filename = params['reference_source']['fasta_filename']
create_symlink = params['reference_source']['create_symlink'] == 'create_symlink'
if create_symlink:
data_table_entry = _create_symlink(input_filename, target_directory, database_id, database_name)
else:
if isinstance(input_filename, list):
fasta_reader = [open(filename, 'rb') for filename in input_filename]
else:
fasta_reader = open(input_filename)
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def _add_data_table_entry(data_manager_dict, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables']['diamond_database'] = data_manager_dict['data_tables'].get('diamond_database', [])
data_manager_dict['data_tables']['diamond_database'].append(data_table_entry)
return data_manager_dict
def _stream_fasta_to_file(fasta_stream, target_directory, database_id,
database_name, params, close_stream=True):
fasta_base_filename = "%s.fa" % database_id
fasta_filename = os.path.join(target_directory, fasta_base_filename)
temp_fasta = tempfile.NamedTemporaryFile(delete=False, suffix=".fasta")
temp_fasta.close()
fasta_writer = open(temp_fasta.name, 'wb+')
if not isinstance(fasta_stream, list):
fasta_stream = [fasta_stream]
last_char = None
for fh in fasta_stream:
if last_char not in [None, '\n', '\r']:
fasta_writer.write('\n')
while True:
data = fh.read(CHUNK_SIZE)
if data:
fasta_writer.write(data)
last_char = data[-1]
else:
break
if close_stream:
fh.close()
fasta_writer.close()
args = ['diamond', 'makedb',
'--in', temp_fasta.name,
'--db', fasta_filename]
if params['tax_cond']['tax_select'] == "history":
for i in ["taxonmap", "taxonnodes", "taxonnames"]:
args.extend(['--' + i, params['tax_cond'][i]])
elif params['tax_cond']['tax_select'] == "ncbi":
if os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL.gz')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL.gz')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.gz')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.gz')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid')])
else:
raise Exception('Unable to find prot.accession2taxid file in %s' % (params['tax_cond']['ncbi_tax']))
args.extend(['--taxonnodes',
os.path.join(params['tax_cond']['ncbi_tax'], 'nodes.dmp')])
args.extend(['--taxonnames',
os.path.join(params['tax_cond']['ncbi_tax'], 'names.dmp')])
tmp_stderr = tempfile.NamedTemporaryFile(prefix="tmp-data-manager-diamond-database-builder-stderr")
proc = subprocess.Popen(args=args, shell=False, cwd=target_directory,
stderr=tmp_stderr.fileno())
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
print("Error building diamond database:", file=sys.stderr)
while True:
chunk = tmp_stderr.read(CHUNK_SIZE)
if not chunk:
break
sys.stderr.write(chunk.decode('utf-8'))
sys.exit(return_code)
tmp_stderr.close()
os.remove(temp_fasta.name)
return dict(value=database_id, name=database_name,
db_path="%s.dmnd" % fasta_base_filename)
def _create_symlink(input_filename, target_directory, database_id, database_name):
fasta_base_filename = "%s.fa" % database_id
fasta_filename = os.path.join(target_directory, fasta_base_filename)
os.symlink(input_filename, fasta_filename)
return dict(value=database_id, name=database_name, db_path=fasta_base_filename)
REFERENCE_SOURCE_TO_DOWNLOAD = dict(ncbi=download_from_ncbi,
url=download_from_url,
history=download_from_history,
directory=copy_from_directory)
def main():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option('-d', '--dbkey_description', dest='dbkey_description',
action='store', type="string", default=None,
help='dbkey_description')
(options, args) = parser.parse_args()
filename = args[0]
with open(filename) as fp:
params = json.load(fp)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
data_manager_dict = {}
param_dict = params['param_dict']
database_id = param_dict['database_id']
database_name = param_dict['database_name']
if param_dict['tax_cond']['tax_select'] == "ncbi":
param_dict['tax_cond']['ncbi_tax'] = args[1]
# Fetch the FASTA
REFERENCE_SOURCE_TO_DOWNLOAD[param_dict['reference_source']['reference_source_selector']](data_manager_dict, param_dict, target_directory, database_id, database_name)
# save info to json file
open(filename, 'w').write(json.dumps(data_manager_dict, sort_keys=True))
if __name__ == "__main__":
main()
|
mvdbeek/tools-iuc
|
data_managers/data_manager_diamond_database_builder/data_manager/data_manager_diamond_database_builder.py
|
Python
|
mit
| 10,627
|
[
"BLAST"
] |
6953a3bb54abde23dee581a0317c8a56035e6cffa16ddb9d23f6dd41f6c0e341
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD style
import numpy as np
from scipy import linalg, optimize, rand
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import array2d
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
def solve_triangular(x, y, lower=True):
return linalg.solve(x, y)
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = array2d(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) / 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij.astype(np.int)
class GaussianProcess(BaseEstimator, RegressorMixin):
"""
The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood rstimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None, corr=...,
normalize=..., nugget=...,
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
**References**:
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
# Run input checks
self._check_params()
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_features, ) with the observations of the
scalar output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Force data to 2D numpy.array
X = array2d(np.asarray(X))
y = np.asarray(y).ravel()[:, np.newaxis]
# Check shapes of DOE & observations
n_samples_X, n_features = X.shape
n_samples_y = y.shape[0]
if n_samples_X != n_samples_y:
raise ValueError("X and y must have the same number of rows.")
else:
n_samples = n_samples_X
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if np.min(np.sum(D, axis=1)) == 0. \
and self.corr != correlation.pure_nugget:
raise Exception("Multiple X are not allowed")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
+ "likely something is going wrong with the "
+ "regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
+ "n_samples=%d must be greater than the "
+ "regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
+ "autocorrelation parameters...")
self.theta, self.reduced_likelihood_function_value, par = \
self.arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value):
raise Exception("Bad parameter region. "
+ "Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
+ "Computing Gaussian Process model parameters...")
self.theta = self.theta0
self.reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
+ "Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simulatneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like
An array with shape (n_eval, ) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) with the Mean Squared Error at x.
"""
# Check input shapes
X = array2d(X)
n_eval, n_features_X = X.shape
n_samples, n_features = self.X.shape
# Run input checks
self._check_params(n_samples)
if n_features_X != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
+ "should match the sample size used for fit() "
+ "which is %d.") % (n_features_X, n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
+ "at instanciation. Need to recompute "
+ "autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T)
else:
# Ordinary Kriging
u = np.zeros(y.shape)
MSE = self.sigma2 * (1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie theta = self.theta).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if np.min(np.sum(D, axis=1)) == 0. \
and self.corr != correlation.pure_nugget:
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
+ "of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print "The chosen optimizer is: " + str(self.optimizer)
if self.random_start > 1:
print str(self.random_start) + " random starts are required."
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(theta=10.
** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t: \
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t: \
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ rand(self.theta0.size).reshape(self.theta0.shape) \
* np.log10(self.thetaU / self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints, iprint=0)
optimal_theta = 10. ** log10_optimal_theta
optimal_minus_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
optimal_rlf_value = - optimal_minus_rlf_value
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print "%s completed" % (5 * percent_completed)
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = array2d(self.theta0.min())
self.thetaL = array2d(self.thetaL.min())
self.thetaU = array2d(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self.arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in np.random.permutation(range(theta0.size)):
if verbose:
print "Proceeding along dimension %d..." % (i + 1)
self.theta0 = array2d(theta_iso)
self.thetaL = array2d(thetaL[0, i])
self.thetaU = array2d(thetaU[0, i])
def corr_cut(t, d):
return corr(array2d(np.hstack([
optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i + 1)::]])), d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self.arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError(("This optimizer ('%s') is not "
+ "implemented yet. Please contribute!")
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError(("regr should be one of %s or callable, "
+ "%s was given.")
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = array2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError(("corr should be one of %s or callable, "
+ "%s was given.")
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
+ "'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = array2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = array2d(self.thetaL)
self.thetaU = array2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
+ "same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
+ "thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
+ "neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if not self.optimizer in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
cdegroc/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
Python
|
bsd-3-clause
| 32,730
|
[
"Gaussian"
] |
22578a7890fd7bb719f8b3d27b4e4bb265796a3da1dacf140d33c274a5742b1f
|
# encoding: utf-8
import os
import numpy as np
import glob
import re
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
import pyroms_toolbox
import _remapping
import matplotlib.pyplot as plt
import datetime
def remapping_bound(varname, srcfile, wts_files, srcgrd, dst_grd, \
rotate_uv=False, trange=None, irange=None, jrange=None, \
dstdir='./' ,zlevel=None, dmax=0, cdepth=0, kk=0, \
uvar='u', vvar='v', rotate_part=False):
'''
A remapping function to extract boundary conditions from one ROMS grid
to another. It will optionally rotating u and v variables, but needs
to be called separately for each u/v pair (such as u/v, uice/vice).
'''
# get input and output grid
if type(srcgrd).__name__ == 'ROMS_Grid':
srcgrd = srcgrd
else:
srcgrd = pyroms.grid.get_ROMS_grid(srcgrd)
if type(dst_grd).__name__ == 'ROMS_Grid':
dst_grd = dst_grd
else:
dst_grd = pyroms.grid.get_ROMS_grid(dst_grd)
# build intermediaire zgrid
if zlevel is None:
zlevel = np.array([-7500.,-7000.,-6500.,-6000.,-5500.,-5000.,\
-4500.,-4000.,-3500.,-3000.,-2500.,-2000.,-1750.,\
-1500.,-1250.,-1000.,-900.,-800.,-700.,-600.,-500.,\
-400.,-300.,-250.,-200.,-175.,-150.,-125.,-100.,-90.,\
-80.,-70.,-60.,-50.,-45.,-40.,-35.,-30.,-25.,-20.,-17.5,\
-15.,-12.5,-10.,-7.5,-5.,-2.5,0.])
else:
zlevel = np.sort(-abs(zlevel))
nzlevel = len(zlevel)
src_zcoord = pyroms.vgrid.z_coordinate(srcgrd.vgrid.h, zlevel, nzlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
srcgrdz = pyroms.grid.ROMS_Grid(srcgrd.name+'_Z', srcgrd.hgrid, src_zcoord)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# varname argument
if type(varname).__name__ == 'list':
nvar = len(varname)
elif type(varname).__name__ == 'str':
varname = [varname]
nvar = len(varname)
else:
raise ValueError, 'varname must be a str or a list of str'
# if we're working on u and v, we'll compute ubar,vbar afterwards
compute_ubar = False
if (varname.__contains__('u') == 1 and varname.__contains__('v') == 1) or \
(varname.__contains__('u_eastward') == 1 and varname.__contains__('v_northward') == 1):
compute_ubar = True
print 'ubar/vbar to be computed from u/v'
if varname.__contains__('ubar'):
varname.remove('ubar')
nvar = nvar-1
if varname.__contains__('vbar'):
varname.remove('vbar')
nvar = nvar-1
# if rotate_uv=True, check that u and v are in varname
if rotate_uv is True:
if varname.__contains__(uvar) == 0 or varname.__contains__(vvar) == 0:
raise Warning, 'varname must include uvar and vvar in order to' \
+ ' rotate the velocity field'
else:
varname.remove(uvar)
varname.remove(vvar)
nvar = nvar-2
# srcfile argument
if type(srcfile).__name__ == 'list':
nfile = len(srcfile)
elif type(srcfile).__name__ == 'str':
srcfile = sorted(glob.glob(srcfile))
nfile = len(srcfile)
else:
raise ValueError, 'src_srcfile must be a str or a list of str'
# get wts_file
if type(wts_files).__name__ == 'str':
wts_files = sorted(glob.glob(wts_files))
sides = ['_west','_east','_north','_south']
long = {'_west':'Western', '_east':'Eastern', \
'_north':'Northern', '_south':'Southern'}
dimexcl = {'_west':'xi', '_east':'xi', \
'_north':'eta', '_south':'eta'}
nctidx = 0
# loop over the srcfile
for nf in range(nfile):
print 'Working with file', srcfile[nf], '...'
# get time
ocean_time = pyroms.utility.get_nc_var('ocean_time', srcfile[nf])
ntime = len(ocean_time[:])
# trange argument
if trange is None:
trange = range(ntime)
# create destination file
if nctidx == 0:
dstfile = dstdir + os.path.basename(srcfile[nf])[:-3] + '_' \
+ dst_grd.name + '_bdry.nc'
if os.path.exists(dstfile) is False:
print 'Creating destination file', dstfile
pyroms_toolbox.nc_create_roms_file(dstfile, dst_grd, \
ocean_time, lgrid=False)
# open destination file
nc = netCDF.Dataset(dstfile, 'a', format='NETCDF3_64BIT')
# loop over time
for nt in trange:
nc.variables['ocean_time'][nctidx] = ocean_time[nt]
# loop over variable
for nv in range(nvar):
print ' '
print 'remapping', varname[nv], 'from', srcgrd.name, \
'to', dst_grd.name
print 'time =', ocean_time[nt]
Mp, Lp = dst_grd.hgrid.mask_rho.shape
# get source data
src_var = pyroms.utility.get_nc_var(varname[nv], srcfile[nf])
# determine variable dimension
ndim = len(src_var.dimensions)-1
# get spval
try:
spval = src_var._FillValue
except:
raise Warning, 'Did not find a _FillValue attribute.'
# irange
if irange is None:
iirange = (0,src_var.shape[-1])
else:
iirange = irange
# jrange
if jrange is None:
jjrange = (0,src_var.shape[-2])
else:
jjrange = jrange
# determine where on the C-grid these variable lies
if src_var.dimensions[2].find('_rho') != -1:
Cpos='rho'
if src_var.dimensions[2].find('_u') != -1:
Cpos='u'
Lp = Lp-1
if src_var.dimensions[2].find('_v') != -1:
Cpos='v'
Mp = Mp-1
if src_var.dimensions[1].find('_w') != -1:
Cpos='w'
print 'Arakawa C-grid position is', Cpos
# create variable in _destination file
if nctidx == 0:
for sid in sides:
varn = varname[nv]+str(sid)
dimens = [i for i in src_var.dimensions]
for dim in dimens:
if re.match(dimexcl[sid],dim):
dimens.remove(dim)
print 'Creating variable', varn, dimens
nc.createVariable(varn, 'f8', dimens, \
fill_value=spval)
nc.variables[varn].long_name = varname[nv] + \
' ' + long[sid] + ' boundary condition'
try:
nc.variables[varn].units = src_var.units
except:
print varn+' has no units'
nc.variables[varn].time = src_var.time
nc.variables[varn].coordinates = \
str(dimens.reverse())
nc.variables[varn].field = src_var.field
# get the right remap weights file
for s in range(len(wts_files)):
if wts_files[s].__contains__(Cpos+'_to_'+Cpos+'.nc'):
wts_file = wts_files[s]
break
else:
if s == len(wts_files) - 1:
raise ValueError, 'Did not find the appropriate remap weights file'
if ndim == 3:
# vertical interpolation from sigma to standard z level
print 'vertical interpolation from sigma to standard z level'
src_varz = pyroms.remapping.roms2z( \
src_var[nt,:,jjrange[0]:jjrange[1],iirange[0]:iirange[1]], \
srcgrd, srcgrdz, Cpos=Cpos, spval=spval, \
irange=iirange, jrange=jjrange)
# flood the grid
print 'flood the grid'
src_varz = pyroms.remapping.flood(src_varz, srcgrdz, Cpos=Cpos, \
irange=iirange, jrange=jjrange, spval=spval, \
dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_varz = src_var[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]
print datetime.datetime.now()
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_varz = pyroms.remapping.remap(src_varz, wts_file, \
spval=spval)
if ndim == 3:
dst_var_north = pyroms.remapping.z2roms(dst_varz[:, \
Mp-1:Mp,0:Lp], dst_grdz, dst_grd, Cpos=Cpos, \
spval=spval, flood=False, irange=(0,Lp), \
jrange=(Mp-1,Mp))
dst_var_south = pyroms.remapping.z2roms(dst_varz[:, \
0:1, :], dst_grdz, dst_grd, Cpos=Cpos, \
spval=spval, flood=False, irange=(0,Lp), \
jrange=(0,1))
dst_var_east = pyroms.remapping.z2roms(dst_varz[:, \
:, Lp-1:Lp], dst_grdz, dst_grd, Cpos=Cpos, \
spval=spval, flood=False, irange=(Lp-1,Lp), \
jrange=(0,Mp))
dst_var_west = pyroms.remapping.z2roms(dst_varz[:, \
:, 0:1], dst_grdz, dst_grd, Cpos=Cpos, \
spval=spval, flood=False, irange=(0,1), \
jrange=(0,Mp))
if varname[nv] == 'u':
dst_u_west = dst_var_west
dst_u_east = dst_var_east
dst_u_north = dst_var_north
dst_u_south = dst_var_south
if varname[nv] == 'v':
dst_v_west = dst_var_west
dst_v_east = dst_var_east
dst_v_north = dst_var_north
dst_v_south = dst_var_south
else:
dst_var_north = dst_varz[-1, :]
dst_var_south = dst_varz[0, :]
dst_var_east = dst_varz[:, -1]
dst_var_west = dst_varz[:, 0]
# print datetime.datetime.now()
# write data in destination file
print 'write data in destination file'
sid = '_west'
varn = varname[nv]+str(sid)
nc.variables[varn][nctidx] = np.squeeze(dst_var_west)
sid = '_east'
varn = varname[nv]+str(sid)
nc.variables[varn][nctidx] = np.squeeze(dst_var_east)
sid = '_north'
varn = varname[nv]+str(sid)
nc.variables[varn][nctidx] = np.squeeze(dst_var_north)
sid = '_south'
varn = varname[nv]+str(sid)
nc.variables[varn][nctidx] = np.squeeze(dst_var_south)
# rotate the velocity field if requested
if rotate_uv is True:
print ' '
print 'remapping and rotating u and v from', srcgrd.name, \
'to', dst_grd.name
# get source data
src_u = pyroms.utility.get_nc_var(uvar, srcfile[nf])
src_v = pyroms.utility.get_nc_var(vvar, srcfile[nf])
# get spval
try:
spval = src_v._FillValue
except:
raise Warning, 'Did not find a _FillValue attribute.'
if rotate_part:
ndim = len(src_u.dimensions)-1
ind = uvar.find('_eastward')
uvar_out = uvar[0:ind]
print "Warning: renaming uvar to", uvar_out
ind = vvar.find('_northward')
vvar_out = vvar[0:ind]
print "Warning: renaming vvar to", vvar_out
if ndim == 3:
dimens_u = ['ocean_time', 's_rho', 'eta_u', 'xi_u']
dimens_v = ['ocean_time', 's_rho', 'eta_v', 'xi_v']
else:
dimens_u = ['ocean_time', 'eta_u', 'xi_u']
dimens_v = ['ocean_time', 'eta_v', 'xi_v']
else:
dimens_u = [i for i in src_u.dimensions]
dimens_v = [i for i in src_v.dimensions]
uvar_out = uvar
vvar_out = vvar
# create variable in destination file
if nctidx == 0:
print 'Creating boundary variables for '+uvar
for sid in sides:
varn = uvar_out+str(sid)
print 'Creating variable', varn
dimens = list(dimens_u)
for dim in dimens:
if re.match(dimexcl[sid],dim):
dimens.remove(dim)
nc.createVariable(varn, 'f8', dimens, \
fill_value=spval)
nc.variables[varn].long_name = uvar_out + \
' ' + long[sid] + ' boundary condition'
try:
nc.variables[varn].units = src_u.units
except:
print varn+' has no units'
nc.variables[varn].time = src_u.time
nc.variables[varn].coordinates = \
str(dimens.reverse())
nc.variables[varn].field = src_u.field
print 'Creating boundary variables for '+vvar
for sid in sides:
varn = vvar_out+str(sid)
print 'Creating variable', varn
dimens = list(dimens_v)
for dim in dimens:
if re.match(dimexcl[sid],dim):
dimens.remove(dim)
nc.createVariable(varn, 'f8', dimens, \
fill_value=spval)
nc.variables[varn].long_name = vvar_out + \
' ' + long[sid] + ' boundary condition'
try:
nc.variables[varn].units = src_v.units
except:
print varn+' has no units'
nc.variables[varn].time = src_v.time
nc.variables[varn].coordinates = \
str(dimens.reverse())
nc.variables[varn].field = src_v.field
# get the right remap weights file
if rotate_part:
for s in range(len(wts_files)):
if wts_files[s].__contains__('rho_to_rho.nc'):
wts_file_u = wts_files[s]
wts_file_v = wts_files[s]
Cpos_u = 'rho'
Cpos_v = 'rho'
else:
for s in range(len(wts_files)):
if wts_files[s].__contains__('u_to_rho.nc'):
wts_file_u = wts_files[s]
if wts_files[s].__contains__('v_to_rho.nc'):
wts_file_v = wts_files[s]
Cpos_u = 'u'
Cpos_v = 'v'
# vertical interpolation from sigma to standard z level
# irange
if irange is None:
iirange = (0,src_u.shape[-1])
else:
iirange = irange
# jrange
if jrange is None:
jjrange = (0,src_u.shape[-2])
else:
jjrange = jrange
ndim = len(src_v.dimensions)-1
if ndim == 3:
print 'vertical interpolation from sigma to standard z level'
src_uz = pyroms.remapping.roms2z( \
src_u[nt,:,jjrange[0]:jjrange[1],iirange[0]:iirange[1]], \
srcgrd, srcgrdz, Cpos=Cpos_u, irange=iirange, jrange=jjrange)
# flood the grid
print 'flood the u grid'
src_uz = pyroms.remapping.flood(src_uz, srcgrdz, Cpos=Cpos_u, \
irange=iirange, jrange=jjrange, \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_uz = src_u[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]
src_uz = pyroms.remapping.flood2d(src_uz, srcgrdz, Cpos=Cpos_u, \
irange=iirange, jrange=jjrange, spval=spval, \
dmax=dmax)
# irange
if irange is None:
iirange = (0,src_v.shape[-1])
else:
iirange = irange
# jrange
if jrange is None:
jjrange = (0,src_v.shape[-2])
else:
jjrange = jrange
if ndim == 3:
src_vz = pyroms.remapping.roms2z( \
src_v[nt,:,jjrange[0]:jjrange[1],iirange[0]:iirange[1]], \
srcgrd, srcgrdz, Cpos=Cpos_v, irange=iirange, jrange=jjrange)
# flood the grid
print 'flood the v grid'
src_vz = pyroms.remapping.flood(src_vz, srcgrdz, Cpos=Cpos_v, \
irange=iirange, jrange=jjrange, \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_vz = src_v[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]
src_vz = pyroms.remapping.flood2d(src_vz, srcgrdz, Cpos=Cpos_v, \
irange=iirange, jrange=jjrange, spval=spval, \
dmax=dmax)
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_uz = pyroms.remapping.remap(src_uz, wts_file_u, \
spval=spval)
dst_vz = pyroms.remapping.remap(src_vz, wts_file_v, \
spval=spval)
Mp, Lp = dst_grd.hgrid.mask_rho.shape
if ndim == 3:
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_u_north = pyroms.remapping.z2roms(dst_uz[:, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_u_south = pyroms.remapping.z2roms(dst_uz[:, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_u_east = pyroms.remapping.z2roms(dst_uz[:, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_u_west = pyroms.remapping.z2roms(dst_uz[:, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
dst_v_north = pyroms.remapping.z2roms(dst_vz[:, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_v_south = pyroms.remapping.z2roms(dst_vz[:, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_v_east = pyroms.remapping.z2roms(dst_vz[:, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_v_west = pyroms.remapping.z2roms(dst_vz[:, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
else:
dst_u_north = dst_uz[Mp-2:Mp, 0:Lp]
dst_u_south = dst_uz[0:2, 0:Lp]
dst_u_east = dst_uz[0:Mp, Lp-2:Lp]
dst_u_west = dst_uz[0:Mp, 0:2]
dst_v_north = dst_vz[Mp-2:Mp, 0:Lp]
dst_v_south = dst_vz[0:2, 0:Lp]
dst_v_east = dst_vz[0:Mp, Lp-2:Lp]
dst_v_west = dst_vz[0:Mp, 0:2]
# rotate u,v fields
if rotate_part:
src_angle = np.zeros(dst_grd.hgrid.angle_rho.shape)
else:
for s in range(len(wts_files)):
if wts_files[s].__contains__('rho_to_rho.nc'):
wts_file = wts_files[s]
src_ang = srcgrd.hgrid.angle_rho[jjrange[0]:jjrange[1],iirange[0]:iirange[1]]
src_angle = pyroms.remapping.remap(src_ang, wts_file)
dst_angle = dst_grd.hgrid.angle_rho
angle = dst_angle - src_angle
if ndim == 3:
angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1))
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[:,Mp-2:Mp, 0:Lp])
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[:,0:2, 0:Lp])
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[:,0:Mp, Lp-2:Lp])
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[:,0:Mp, 0:2])
else:
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[Mp-2:Mp, 0:Lp])
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[0:2, 0:Lp])
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[0:Mp, Lp-2:Lp])
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[0:Mp, 0:2])
U_north = U_north * eitheta_north
dst_u_north = np.real(U_north)
dst_v_north = np.imag(U_north)
U_south = U_south * eitheta_south
dst_u_south = np.real(U_south)
dst_v_south = np.imag(U_south)
U_east = U_east * eitheta_east
dst_u_east = np.real(U_east)
dst_v_east = np.imag(U_east)
U_west = U_west * eitheta_west
dst_u_west = np.real(U_west)
dst_v_east = np.imag(U_east)
# move back to u,v points
if ndim == 3:
dst_u_north = 0.5 * np.squeeze(dst_u_north[:,-1,:-1] + \
dst_u_north[:,-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:,:-1,:] + \
dst_v_north[:,1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[:,0,:-1] + \
dst_u_south[:,0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:,:-1,:] + \
dst_v_south[:,1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:,:-1] + \
dst_u_east[:,:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:,:-1,-1] + \
dst_v_east[:,1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:,:-1] + \
dst_u_west[:,:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:,:-1,0] + \
dst_v_west[:,1:,0])
else:
dst_u_north = 0.5 * np.squeeze(dst_u_north[-1,:-1] + \
dst_u_north[-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:-1,:] + \
dst_v_north[1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[0,:-1] + \
dst_u_south[0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:-1,:] + \
dst_v_south[1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:-1] + \
dst_u_east[:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:-1,-1] + \
dst_v_east[1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:-1] + \
dst_u_west[:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:-1,0] + \
dst_v_west[1:,0])
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0)
idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
if ndim == 3:
for n in range(dst_grd.vgrid.N):
dst_u_north[n, idxu_north[0]] = spval
dst_v_north[n, idxv_north[0]] = spval
dst_u_south[n, idxu_south[0]] = spval
dst_v_south[n, idxv_south[0]] = spval
dst_u_east[n, idxu_east[0]] = spval
dst_v_east[n, idxv_east[0]] = spval
dst_u_west[n, idxu_west[0]] = spval
dst_v_west[n, idxv_west[0]] = spval
else:
dst_u_north[idxu_north[0]] = spval
dst_v_north[idxv_north[0]] = spval
dst_u_south[idxu_south[0]] = spval
dst_v_south[idxv_south[0]] = spval
dst_u_east[idxu_east[0]] = spval
dst_v_east[idxv_east[0]] = spval
dst_u_west[idxu_west[0]] = spval
dst_v_west[idxv_west[0]] = spval
# write data in destination file
print 'write data in destination file'
sid = '_west'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_west
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_west
sid = '_north'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_north
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_north
sid = '_east'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_east
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_east
sid = '_south'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_south
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_south
if compute_ubar:
if nctidx == 0:
print 'Creating variable ubar_north'
nc.createVariable('ubar_north', 'f8', \
('ocean_time', 'xi_u'), fill_value=spval)
nc.variables['ubar_north'].long_name = \
'2D u-momentum north boundary condition'
nc.variables['ubar_north'].units = 'meter second-1'
nc.variables['ubar_north'].time = 'ocean_time'
nc.variables['ubar_north'].coordinates = 'xi_u ocean_time'
nc.variables['ubar_north'].field = 'ubar_north, scalar, series'
print 'Creating variable vbar_north'
nc.createVariable('vbar_north', 'f8', \
('ocean_time', 'xi_v'), fill_value=spval)
nc.variables['vbar_north'].long_name = \
'2D v-momentum north boundary condition'
nc.variables['vbar_north'].units = 'meter second-1'
nc.variables['vbar_north'].time = 'ocean_time'
nc.variables['vbar_north'].coordinates = 'xi_v ocean_time'
nc.variables['vbar_north'].field = 'vbar_north,, scalar, series'
print 'Creating variable ubar_south'
nc.createVariable('ubar_south', 'f8', \
('ocean_time', 'xi_u'), fill_value=spval)
nc.variables['ubar_south'].long_name = \
'2D u-momentum south boundary condition'
nc.variables['ubar_south'].units = 'meter second-1'
nc.variables['ubar_south'].time = 'ocean_time'
nc.variables['ubar_south'].coordinates = 'xi_u ocean_time'
nc.variables['ubar_south'].field = 'ubar_south, scalar, series'
print 'Creating variable vbar_south'
nc.createVariable('vbar_south', 'f8', \
('ocean_time', 'xi_v'), fill_value=spval)
nc.variables['vbar_south'].long_name = \
'2D v-momentum south boundary condition'
nc.variables['vbar_south'].units = 'meter second-1'
nc.variables['vbar_south'].time = 'ocean_time'
nc.variables['vbar_south'].coordinates = 'xi_v ocean_time'
print 'Creating variable ubar_west'
nc.createVariable('ubar_west', 'f8', \
('ocean_time', 'eta_u'), fill_value=spval)
nc.variables['ubar_west'].long_name = \
'2D u-momentum west boundary condition'
nc.variables['ubar_west'].units = 'meter second-1'
nc.variables['ubar_west'].time = 'ocean_time'
nc.variables['ubar_west'].coordinates = 'eta_u ocean_time'
nc.variables['ubar_west'].field = 'ubar_west, scalar, series'
print 'Creating variable vbar_west'
nc.createVariable('vbar_west', 'f8', \
('ocean_time', 'eta_v'), fill_value=spval)
nc.variables['vbar_west'].long_name = \
'2D v-momentum west boundary condition'
nc.variables['vbar_west'].units = 'meter second-1'
nc.variables['vbar_west'].time = 'ocean_time'
nc.variables['vbar_west'].coordinates = 'eta_v ocean_time'
print 'Creating variable ubar_east'
nc.createVariable('ubar_east', 'f8', \
('ocean_time', 'eta_u'), fill_value=spval)
nc.variables['ubar_east'].long_name = \
'2D u-momentum east boundary condition'
nc.variables['ubar_east'].units = 'meter second-1'
nc.variables['ubar_east'].time = 'ocean_time'
nc.variables['ubar_east'].coordinates = 'eta_u ocean_time'
nc.variables['ubar_east'].field = 'ubar_east, scalar, series'
print 'Creating variable vbar_east'
nc.createVariable('vbar_east', 'f8', \
('ocean_time', 'eta_v'), fill_value=spval)
nc.variables['vbar_east'].long_name = \
'2D v-momentum east boundary condition'
nc.variables['vbar_east'].units = 'meter second-1'
nc.variables['vbar_east'].time = 'ocean_time'
nc.variables['vbar_east'].coordinates = 'eta_v ocean_time'
# compute depth average velocity ubar and vbar
# get z at the right position
print 'Computing ubar/vbar from u/v'
z_u_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:-1] +
dst_grd.vgrid.z_w[0,:,-1, 1:])
z_v_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:] +
dst_grd.vgrid.z_w[0,:,-2,:])
z_u_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:-1] +
dst_grd.vgrid.z_w[0,:,0,1:])
z_v_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:] +
dst_grd.vgrid.z_w[0,:,1,:])
z_u_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:,-1] +
dst_grd.vgrid.z_w[0,:,:,-2])
z_v_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,-1] +
dst_grd.vgrid.z_w[0,:,1:,-1])
z_u_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:,0] +
dst_grd.vgrid.z_w[0,:,:,1])
z_v_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,0] +
dst_grd.vgrid.z_w[0,:,1:,0])
if not rotate_uv:
dst_u_north = np.squeeze(dst_u_north)
dst_v_north = np.squeeze(dst_v_north)
dst_u_south = np.squeeze(dst_u_south)
dst_v_south = np.squeeze(dst_v_south)
dst_u_east = np.squeeze(dst_u_east)
dst_v_east = np.squeeze(dst_v_east)
dst_u_west = np.squeeze(dst_u_west)
dst_v_west = np.squeeze(dst_v_west)
dst_ubar_north = np.zeros(dst_u_north.shape[1])
dst_ubar_south = np.zeros(dst_u_south.shape[1])
dst_ubar_east = np.zeros(dst_u_east.shape[1])
dst_ubar_west = np.zeros(dst_u_west.shape[1])
dst_vbar_north = np.zeros(dst_v_north.shape[1])
dst_vbar_south = np.zeros(dst_v_south.shape[1])
dst_vbar_east = np.zeros(dst_v_east.shape[1])
dst_vbar_west = np.zeros(dst_v_west.shape[1])
# print 'Shapes 3', dst_u_north.shape, dst_ubar_north.shape, z_u_north.shape, np.diff(z_u_north[:,1]).shape
for i in range(dst_u_north.shape[1]):
dst_ubar_north[i] = (dst_u_north[:,i] * \
np.diff(z_u_north[:,i])).sum() / -z_u_north[0,i]
dst_ubar_south[i] = (dst_u_south[:,i] * \
np.diff(z_u_south[:,i])).sum() / -z_u_south[0,i]
for i in range(dst_v_north.shape[1]):
dst_vbar_north[i] = (dst_v_north[:,i] * \
np.diff(z_v_north[:,i])).sum() / -z_v_north[0,i]
dst_vbar_south[i] = (dst_v_south[:,i] * \
np.diff(z_v_south[:,i])).sum() / -z_v_south[0,i]
for j in range(dst_u_east.shape[1]):
dst_ubar_east[j] = (dst_u_east[:,j] * \
np.diff(z_u_east[:,j])).sum() / -z_u_east[0,j]
dst_ubar_west[j] = (dst_u_west[:,j] * \
np.diff(z_u_west[:,j])).sum() / -z_u_west[0,j]
for j in range(dst_v_east.shape[1]):
dst_vbar_east[j] = (dst_v_east[:,j] * \
np.diff(z_v_east[:,j])).sum() / -z_v_east[0,j]
dst_vbar_west[j] = (dst_v_west[:,j] * \
np.diff(z_v_west[:,j])).sum() / -z_v_west[0,j]
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0)
idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
dst_ubar_north[idxu_north[0]] = spval
dst_vbar_north[idxv_north[0]] = spval
dst_ubar_south[idxu_south[0]] = spval
dst_vbar_south[idxv_south[0]] = spval
dst_ubar_east[idxu_east[0]] = spval
dst_vbar_east[idxv_east[0]] = spval
dst_ubar_west[idxu_west[0]] = spval
dst_vbar_west[idxv_west[0]] = spval
nc.variables['ubar_north'][nctidx] = dst_ubar_north
nc.variables['ubar_south'][nctidx] = dst_ubar_south
nc.variables['ubar_east'][nctidx] = dst_ubar_east
nc.variables['ubar_west'][nctidx] = dst_ubar_west
nc.variables['vbar_north'][nctidx] = dst_vbar_north
nc.variables['vbar_south'][nctidx] = dst_vbar_south
nc.variables['vbar_east'][nctidx] = dst_vbar_east
nc.variables['vbar_west'][nctidx] = dst_vbar_west
nctidx = nctidx + 1
# close files here? how?
# close destination file
nc.close()
return
|
dcherian/pyroms
|
pyroms_toolbox/pyroms_toolbox/remapping_bound.py
|
Python
|
bsd-3-clause
| 38,551
|
[
"NetCDF"
] |
f6e1c2b730271bff23481f08e5f999893694942b92a39aa3342843f0c9829029
|
import logging
import numpy as np
from collections import OrderedDict
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.nnet.conv import conv2d, ConvOp
from theano.sandbox.cuda.blas import GpuCorrMM
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from blocks.bricks.cost import SquaredError
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
from blocks.graph import add_annotation, Annotation
from blocks.roles import add_role, PARAMETER, WEIGHT, BIAS
from utils import shared_param, AttributeDict
from nn import maxpool_2d, global_meanpool_2d, BNPARAM
logger = logging.getLogger('main.model')
floatX = theano.config.floatX
theano.sandbox.cuda.use('gpu1')
class LadderAE():
def __init__(self, p):
self.p = p
self.init_weights_transpose = False
self.default_lr = p.lr
self.shareds = OrderedDict()
self.rstream = RandomStreams(seed=p.seed)
self.rng = np.random.RandomState(seed=p.seed)
n_layers = len(p.encoder_layers)
assert n_layers > 1, "Need to define encoder layers"
assert n_layers == len(p.denoising_cost_x), (
"Number of denoising costs does not match with %d layers: %s" %
(n_layers, str(p.denoising_cost_x)))
def one_to_all(x):
""" (5.,) -> 5 -> (5., 5., 5.)
('relu',) -> 'relu' -> ('relu', 'relu', 'relu')
"""
if type(x) is tuple and len(x) == 1:
x = x[0]
if type(x) is float:
x = (np.float32(x),) * n_layers
if type(x) is str:
x = (x,) * n_layers
return x
p.decoder_spec = one_to_all(p.decoder_spec)
p.f_local_noise_std = one_to_all(p.f_local_noise_std)
acts = one_to_all(p.get('act', 'relu'))
assert n_layers == len(p.decoder_spec), "f and g need to match"
assert (n_layers == len(acts)), (
"Not enough activations given. Requires %d. Got: %s" %
(n_layers, str(acts)))
acts = acts[:-1] + ('softmax',)
def parse_layer(spec):
""" 'fc:5' -> ('fc', 5)
'5' -> ('fc', 5)
5 -> ('fc', 5)
'convv:3:2:2' -> ('convv', [3,2,2])
"""
if type(spec) is not str:
return "fc", spec
spec = spec.split(':')
l_type = spec.pop(0) if len(spec) >= 2 else "fc"
spec = map(int, spec)
spec = spec[0] if len(spec) == 1 else spec
return l_type, spec
enc = map(parse_layer, p.encoder_layers)
self.layers = list(enumerate(zip(enc, p.decoder_spec, acts)))
def weight(self, init, name, cast_float32=True, for_conv=False):
weight = self.shared(init, name, cast_float32, role=WEIGHT)
if for_conv:
return weight.dimshuffle('x', 0, 'x', 'x')
return weight
def bias(self, init, name, cast_float32=True, for_conv=False):
b = self.shared(init, name, cast_float32, role=BIAS)
if for_conv:
return b.dimshuffle('x', 0, 'x', 'x')
return b
def shared(self, init, name, cast_float32=True, role=PARAMETER, **kwargs):
p = self.shareds.get(name)
if p is None:
p = shared_param(init, name, cast_float32, role, **kwargs)
self.shareds[name] = p
return p
def counter(self):
name = 'counter'
p = self.shareds.get(name)
update = []
if p is None:
p_max_val = np.float32(10)
p = self.shared(np.float32(1), name, role=BNPARAM)
p_max = self.shared(p_max_val, name + '_max', role=BNPARAM)
update = [(p, T.clip(p + np.float32(1), np.float32(0), p_max)),
(p_max, p_max_val)]
return (p, update)
def noise_like(self, x):
noise = self.rstream.normal(size=x.shape, avg=0.0, std=1.0)
return T.cast(noise, dtype=floatX)
def rand_init(self, in_dim, out_dim):
""" Random initialization for fully connected layers """
W = self.rng.randn(in_dim, out_dim) / np.sqrt(in_dim)
return W
def rand_init_conv(self, dim):
""" Random initialization for convolution filters """
fan_in = np.prod(dtype=floatX, a=dim[1:])
bound = np.sqrt(3. / max(1.0, (fan_in)))
W = np.asarray(
self.rng.uniform(low=-bound, high=bound, size=dim), dtype=floatX)
return W
def new_activation_dict(self):
return AttributeDict({'z': {}, 'h': {}, 's': {}, 'm': {}})
def annotate_update(self, update, tag_to):
a = Annotation()
for (var, up) in update:
a.updates[var] = up
add_annotation(tag_to, a)
def apply(self, input_labeled, target_labeled, input_unlabeled):
self.layer_counter = 0
input_dim = self.p.encoder_layers[0]
# Store the dimension tuples in the same order as layers.
layers = self.layers
self.layer_dims = {0: input_dim}
self.lr = self.shared(self.default_lr, 'learning_rate', role=None)
self.costs = costs = AttributeDict()
self.costs.denois = AttributeDict()
self.act = AttributeDict()
self.error = AttributeDict()
top = len(layers) - 1
N = input_labeled.shape[0]
self.join = lambda l, u: T.concatenate([l, u], axis=0)
self.labeled = lambda x: x[:N] if x is not None else x
self.unlabeled = lambda x: x[N:] if x is not None else x
self.split_lu = lambda x: (self.labeled(x), self.unlabeled(x))
input_concat = self.join(input_labeled, input_unlabeled)
def encoder(input_, path_name, input_noise_std=0, noise_std=[]):
h = input_
logger.info(' 0: noise %g' % input_noise_std)
if input_noise_std > 0.:
h = h + self.noise_like(h) * input_noise_std
d = AttributeDict()
d.unlabeled = self.new_activation_dict()
d.labeled = self.new_activation_dict()
d.labeled.z[0] = self.labeled(h)
d.unlabeled.z[0] = self.unlabeled(h)
prev_dim = input_dim
for i, (spec, _, act_f) in layers[1:]:
d.labeled.h[i - 1], d.unlabeled.h[i - 1] = self.split_lu(h)
noise = noise_std[i] if i < len(noise_std) else 0.
curr_dim, z, m, s, h = self.f(h, prev_dim, spec, i, act_f,
path_name=path_name,
noise_std=noise)
assert self.layer_dims.get(i) in (None, curr_dim)
self.layer_dims[i] = curr_dim
d.labeled.z[i], d.unlabeled.z[i] = self.split_lu(z)
d.unlabeled.s[i] = s
d.unlabeled.m[i] = m
prev_dim = curr_dim
d.labeled.h[i], d.unlabeled.h[i] = self.split_lu(h)
return d
# Clean, supervised
logger.info('Encoder: clean, labeled')
clean = self.act.clean = encoder(input_concat, 'clean')
# Corrupted, supervised
logger.info('Encoder: corr, labeled')
corr = self.act.corr = encoder(input_concat, 'corr',
input_noise_std=self.p.super_noise_std,
noise_std=self.p.f_local_noise_std)
est = self.act.est = self.new_activation_dict()
# Decoder path in opposite order
logger.info('Decoder: z_corr -> z_est')
for i, ((_, spec), l_type, act_f) in layers[::-1]:
z_corr = corr.unlabeled.z[i]
z_clean = clean.unlabeled.z[i]
z_clean_s = clean.unlabeled.s.get(i)
z_clean_m = clean.unlabeled.m.get(i)
fspec = layers[i+1][1][0] if len(layers) > i+1 else (None, None)
if i == top:
ver = corr.unlabeled.h[i]
ver_dim = self.layer_dims[i]
top_g = True
else:
ver = est.z.get(i + 1)
ver_dim = self.layer_dims.get(i + 1)
top_g = False
z_est = self.g(z_lat=z_corr,
z_ver=ver,
in_dims=ver_dim,
out_dims=self.layer_dims[i],
l_type=l_type,
num=i,
fspec=fspec,
top_g=top_g)
if z_est is not None:
# Denoising cost
if z_clean_s and self.p.zestbn == 'bugfix':
z_est_norm = (z_est - z_clean_m) / T.sqrt(z_clean_s + np.float32(1e-10))
elif z_clean_s is None or self.p.zestbn == 'no':
z_est_norm = z_est
else:
assert False, 'Not supported path'
se = SquaredError('denois' + str(i))
costs.denois[i] = se.apply(z_est_norm.flatten(2),
z_clean.flatten(2)) \
/ np.prod(self.layer_dims[i], dtype=floatX)
costs.denois[i].name = 'denois' + str(i)
denois_print = 'denois %.2f' % self.p.denoising_cost_x[i]
else:
denois_print = ''
# Store references for later use
est.h[i] = self.apply_act(z_est, act_f)
est.z[i] = z_est
est.s[i] = None
est.m[i] = None
logger.info(' g%d: %10s, %s, dim %s -> %s' % (
i, l_type,
denois_print,
self.layer_dims.get(i+1),
self.layer_dims.get(i)
))
# Costs
y = target_labeled.flatten()
costs.class_clean = CategoricalCrossEntropy().apply(y, clean.labeled.h[top])
costs.class_clean.name = 'cost_class_clean'
costs.class_corr = CategoricalCrossEntropy().apply(y, corr.labeled.h[top])
costs.class_corr.name = 'cost_class_corr'
# This will be used for training
costs.total = costs.class_corr * 1.0
for i in range(top + 1):
if costs.denois.get(i) and self.p.denoising_cost_x[i] > 0:
costs.total += costs.denois[i] * self.p.denoising_cost_x[i]
costs.total.name = 'cost_total'
# Classification error
mr = MisclassificationRate()
self.error.clean = mr.apply(y, clean.labeled.h[top]) * np.float32(100.)
self.error.clean.name = 'error_rate_clean'
def apply_act(self, input, act_name):
if input is None:
return input
act = {
'relu': lambda x: T.maximum(0, x),
'leakyrelu': lambda x: T.switch(x > 0., x, 0.1 * x),
'linear': lambda x: x,
'softplus': lambda x: T.log(1. + T.exp(x)),
'sigmoid': lambda x: T.nnet.sigmoid(x),
'softmax': lambda x: T.nnet.softmax(x),
}.get(act_name)
assert act, 'unknown act %s' % act_name
if act_name == 'softmax':
input = input.flatten(2)
return act(input)
def annotate_bn(self, var, id, var_type, mb_size, size, norm_ax):
var_shape = np.array((1,) + size)
out_dim = np.prod(var_shape) / np.prod(var_shape[list(norm_ax)])
# Flatten the var - shared variable updating is not trivial otherwise,
# as theano seems to believe a row vector is a matrix and will complain
# about the updates
orig_shape = var.shape
var = var.flatten()
# Here we add the name and role, the variables will later be identified
# by these values
var.name = id + '_%s_clean' % var_type
add_role(var, BNPARAM)
shared_var = self.shared(np.zeros(out_dim),
name='shared_%s' % var.name, role=None)
# Update running average estimates. When the counter is reset to 1, it
# will clear its memory
cntr, c_up = self.counter()
one = np.float32(1)
run_avg = lambda new, old: one / cntr * new + (one - one / cntr) * old
if var_type == 'mean':
new_value = run_avg(var, shared_var)
elif var_type == 'var':
mb_size = T.cast(mb_size, 'float32')
new_value = run_avg(mb_size / (mb_size - one) * var, shared_var)
else:
raise NotImplemented('Unknown batch norm var %s' % var_type)
# Add the counter update to the annotated update if it is the first
# instance of a counter
self.annotate_update([(shared_var, new_value)] + c_up, var)
return var.reshape(orig_shape)
def f(self, h, in_dim, spec, num, act_f, path_name, noise_std=0):
assert path_name in ['clean', 'corr']
# Generates identifiers used for referencing shared variables.
# E.g. clean and corrupted encoders will end up using the same
# variable name and hence sharing parameters
gen_id = lambda s: '_'.join(['f', str(num), s])
layer_type, _ = spec
# Pooling
if layer_type in ['maxpool', 'globalmeanpool']:
z, output_size = self.f_pool(h, spec, in_dim)
norm_ax = (0, -2, -1)
# after pooling, no activation func for now unless its softmax
act_f = "linear" if act_f != "softmax" else act_f
# Convolution
elif layer_type in ['convv', 'convf']:
z, output_size = self.f_conv(h, spec, in_dim, gen_id('W'))
norm_ax = (0, -2, -1)
# Fully connected
elif layer_type == "fc":
h = h.flatten(2) if h.ndim > 2 else h
_, dim = spec
W = self.weight(self.rand_init(np.prod(in_dim), dim), gen_id('W'))
z, output_size = T.dot(h, W), (dim,)
norm_ax = (0,)
else:
raise ValueError("Unknown layer spec: %s" % layer_type)
m = s = None
is_normalizing = True
if is_normalizing:
keep_dims = True
z_l = self.labeled(z)
z_u = self.unlabeled(z)
m = z_u.mean(norm_ax, keepdims=keep_dims)
s = z_u.var(norm_ax, keepdims=keep_dims)
m_l = z_l.mean(norm_ax, keepdims=keep_dims)
s_l = z_l.var(norm_ax, keepdims=keep_dims)
if path_name == 'clean':
# Batch normalization estimates the mean and variance of
# validation and test sets based on the training set
# statistics. The following annotates the computation of
# running average to the graph.
m_l = self.annotate_bn(m_l, gen_id('bn'), 'mean', z_l.shape[0],
output_size, norm_ax)
s_l = self.annotate_bn(s_l, gen_id('bn'), 'var', z_l.shape[0],
output_size, norm_ax)
z = self.join(
(z_l - m_l) / T.sqrt(s_l + np.float32(1e-10)),
(z_u - m) / T.sqrt(s + np.float32(1e-10)))
if noise_std > 0:
z += self.noise_like(z) * noise_std
# z for lateral connection
z_lat = z
b_init, c_init = 0.0, 1.0
b_c_size = output_size[0]
# Add bias
if act_f != 'linear':
z += self.bias(b_init * np.ones(b_c_size), gen_id('b'),
for_conv=len(output_size) > 1)
if is_normalizing:
# Add free parameter (gamma in original Batch Normalization paper)
# if needed by the activation. For instance ReLU does't need one
# and we only add it to softmax if hyperparameter top_c is set.
if (act_f not in ['relu', 'leakyrelu', 'linear', 'softmax'] or
(act_f == 'softmax' and self.p.top_c is True)):
c = self.weight(c_init * np.ones(b_c_size), gen_id('c'),
for_conv=len(output_size) > 1)
z *= c
h = self.apply_act(z, act_f)
logger.info(' f%d: %s, %s,%s noise %.2f, params %s, dim %s -> %s' % (
num, layer_type, act_f, ' BN,' if is_normalizing else '',
noise_std, spec[1], in_dim, output_size))
return output_size, z_lat, m, s, h
def f_pool(self, x, spec, in_dim):
layer_type, dims = spec
num_filters = in_dim[0]
if "globalmeanpool" == layer_type:
y, output_size = global_meanpool_2d(x, num_filters)
# scale the variance to match normal conv layers with xavier init
y = y * np.float32(in_dim[-1]) * np.float32(np.sqrt(3))
else:
assert dims[0] != 1 or dims[1] != 1
y, output_size = maxpool_2d(x, in_dim,
poolsize=(dims[1], dims[1]),
poolstride=(dims[0], dims[0]))
return y, output_size
def f_conv(self, x, spec, in_dim, weight_name):
layer_type, dims = spec
num_filters = dims[0]
filter_size = (dims[1], dims[1])
stride = (dims[2], dims[2])
bm = 'full' if 'convf' in layer_type else 'valid'
num_channels = in_dim[0]
W = self.weight(self.rand_init_conv(
(num_filters, num_channels) + filter_size), weight_name)
if stride != (1, 1):
f = GpuCorrMM(subsample=stride, border_mode=bm, pad=(0, 0))
y = f(gpu_contiguous(x), gpu_contiguous(W))
else:
assert self.p.batch_size == self.p.valid_batch_size
y = conv2d(x, W, image_shape=(2*self.p.batch_size, ) + in_dim,
filter_shape=((num_filters, num_channels) +
filter_size), border_mode=bm)
output_size = ((num_filters,) +
ConvOp.getOutputShape(in_dim[1:], filter_size,
stride, bm))
return y, output_size
def g(self, z_lat, z_ver, in_dims, out_dims, l_type, num, fspec, top_g):
f_layer_type, dims = fspec
is_conv = f_layer_type is not None and ('conv' in f_layer_type or
'pool' in f_layer_type)
gen_id = lambda s: '_'.join(['g', str(num), s])
in_dim = np.prod(dtype=floatX, a=in_dims)
out_dim = np.prod(dtype=floatX, a=out_dims)
num_filters = out_dims[0] if is_conv else out_dim
if l_type[-1] in ['0']:
g_type, u_type = l_type[:-1], l_type[-1]
else:
g_type, u_type = l_type, None
# Mapping from layer above: u
if u_type in ['0'] or z_ver is None:
if z_ver is None and u_type not in ['0']:
logger.warn('Decoder %d:%s without vertical input' %
(num, g_type))
u = None
else:
if top_g:
u = z_ver
elif is_conv:
u = self.g_deconv(z_ver, in_dims, out_dims, gen_id('W'), fspec)
else:
W = self.weight(self.rand_init(in_dim, out_dim), gen_id('W'))
u = T.dot(z_ver, W)
# Batch-normalize u
if u is not None:
norm_ax = (0,) if u.ndim <= 2 else (0, -2, -1)
keep_dims = True
u -= u.mean(norm_ax, keepdims=keep_dims)
u /= T.sqrt(u.var(norm_ax, keepdims=keep_dims) +
np.float32(1e-10))
# Define the g function
if not is_conv:
z_lat = z_lat.flatten(2)
bi = lambda inits, name: self.bias(inits * np.ones(num_filters),
gen_id(name), for_conv=is_conv)
wi = lambda inits, name: self.weight(inits * np.ones(num_filters),
gen_id(name), for_conv=is_conv)
if g_type == '':
z_est = None
elif g_type == 'i':
z_est = z_lat
elif g_type in ['sig']:
sigval = bi(0., 'c1') + wi(1., 'c2') * z_lat
if u is not None:
sigval += wi(0., 'c3') * u + wi(0., 'c4') * z_lat * u
sigval = T.nnet.sigmoid(sigval)
z_est = bi(0., 'a1') + wi(1., 'a2') * z_lat + wi(1., 'b1') * sigval
if u is not None:
z_est += wi(0., 'a3') * u + wi(0., 'a4') * z_lat * u
elif g_type in ['lin']:
a1 = wi(1.0, 'a1')
b = bi(0.0, 'b')
z_est = a1 * z_lat + b
elif g_type in ['relu']:
assert u is not None
b = bi(0., 'b')
x = u + b
z_est = self.apply_act(x, 'relu')
elif g_type in ['sigmoid']:
assert u is not None
b = bi(0., 'b')
c = wi(1., 'c')
z_est = self.apply_act((u + b) * c, 'sigmoid')
elif g_type in ['comparison_g2']:
# sig without the uz cross term
sigval = bi(0., 'c1') + wi(1., 'c2') * z_lat
if u is not None:
sigval += wi(0., 'c3') * u
sigval = T.nnet.sigmoid(sigval)
z_est = bi(0., 'a1') + wi(1., 'a2') * z_lat + wi(1., 'b1') * sigval
if u is not None:
z_est += wi(0., 'a3') * u
elif g_type in ['comparison_g3']:
# sig without the sigmoid nonlinearity
z_est = bi(0., 'a1') + wi(1., 'a2') * z_lat
if u is not None:
z_est += wi(0., 'a3') * u + wi(0., 'a4') * z_lat * u
elif g_type in ['comparison_g4']:
# No mixing between z_lat and u before final sum, otherwise similar
# to sig
def nonlin(inp, in_name='input', add_bias=True):
w1 = wi(1., 'w1_%s' % in_name)
b1 = bi(0., 'b1')
w2 = wi(1., 'w2_%s' % in_name)
b2 = bi(0., 'b2') if add_bias else 0
w3 = wi(0., 'w3_%s' % in_name)
return w2 * T.nnet.sigmoid(b1 + w1 * inp) + w3 * inp + b2
z_est = nonlin(z_lat, 'lat') if u is None else \
nonlin(z_lat, 'lat') + nonlin(u, 'ver', False)
elif g_type in ['comparison_g5', 'gauss']:
# Gaussian assumption on z: (z - mu) * v + mu
if u is None:
b1 = bi(0., 'b1')
w1 = wi(1., 'w1')
z_est = w1 * z_lat + b1
else:
a1 = bi(0., 'a1')
a2 = wi(1., 'a2')
a3 = bi(0., 'a3')
a4 = bi(0., 'a4')
a5 = bi(0., 'a5')
a6 = bi(0., 'a6')
a7 = wi(1., 'a7')
a8 = bi(0., 'a8')
a9 = bi(0., 'a9')
a10 = bi(0., 'a10')
mu = a1 * T.nnet.sigmoid(a2 * u + a3) + a4 * u + a5
v = a6 * T.nnet.sigmoid(a7 * u + a8) + a9 * u + a10
z_est = (z_lat - mu) * v + mu
else:
raise NotImplementedError("unknown g type: %s" % str(g_type))
# Reshape the output if z is for conv but u from fc layer
if (z_est is not None and type(out_dims) == tuple and
len(out_dims) > 1.0 and z_est.ndim < 4):
z_est = z_est.reshape((z_est.shape[0],) + out_dims)
return z_est
def g_deconv(self, z_ver, in_dims, out_dims, weight_name, fspec):
""" Inverse operation for each type of f used in convnets """
f_type, f_dims = fspec
assert z_ver is not None
num_channels = in_dims[0] if in_dims is not None else None
num_filters, width, height = out_dims[:3]
if f_type in ['globalmeanpool']:
u = T.addbroadcast(z_ver, 2, 3)
assert in_dims[1] == 1 and in_dims[2] == 1, \
"global pooling needs in_dims (1,1): %s" % str(in_dims)
elif f_type in ['maxpool']:
sh, str, size = z_ver.shape, f_dims[0], f_dims[1]
assert str == size, "depooling requires stride == size"
u = T.zeros((sh[0], sh[1], sh[2] * str, sh[3] * str),
dtype=z_ver.dtype)
for x in xrange(str):
for y in xrange(str):
u = T.set_subtensor(u[:, :, x::str, y::str], z_ver)
u = u[:, :, :width, :height]
elif f_type in ['convv', 'convf']:
filter_size, str = (f_dims[1], f_dims[1]), f_dims[2]
W_shape = (num_filters, num_channels) + filter_size
W = self.weight(self.rand_init_conv(W_shape), weight_name)
if str > 1:
# upsample if strided version
sh = z_ver.shape
u = T.zeros((sh[0], sh[1], sh[2] * str, sh[3] * str),
dtype=z_ver.dtype)
u = T.set_subtensor(u[:, :, ::str, ::str], z_ver)
else:
u = z_ver # no strides, only deconv
u = conv2d(u, W, filter_shape=W_shape,
border_mode='valid' if 'convf' in f_type else 'full')
u = u[:, :, :width, :height]
else:
raise NotImplementedError('Layer %s has no convolutional decoder'
% f_type)
return u
|
ryukinkou/ladder_customized
|
ladder_theano_gpu1/ladder.py
|
Python
|
mit
| 25,301
|
[
"Gaussian"
] |
2195938902b29416ca52499ba7223e64078da0e74f9f64e6d79b6e15083a42c0
|
# File generated from python blocks in "doc/atomic-ops.tex"
>>> import sys
>>> HOST = sys.argv[2]
>>> PORT = int(sys.argv[3])
>>> import hyperdex.admin
>>> a = hyperdex.admin.Admin(HOST, PORT)
>>> a.add_space('''
... space friendlists
... key username
... attributes
... string first,
... string last,
... set(string) friends
... ''')
True
>>> import hyperdex.client
>>> c = hyperdex.client.Client(HOST, PORT)
>>> c.put('friendlists', 'jsmith1', {'first': 'John', 'last': 'Smith',
... 'friends': set(['bjones1', 'jd', 'jj'])})
True
>>> c.put('friendlists', 'jd', {'first': 'John', 'last': 'Doe'})
True
>>> c.put('friendlists', 'bjones1', {'first': 'Brian', 'last': 'Jones'})
True
>>> c.get('friendlists', 'jsmith1')
{'first': 'John', 'last': 'Smith', 'friends': set(['bjones1', 'jd', 'jj'])}
>>> c.cond_put('friendlists', 'jsmith1',
... {'first': 'John', 'last': 'Smith'},
... {'first': 'Jon'})
True
>>> c.get('friendlists', 'jsmith1')
{'first': 'Jon', 'last': 'Smith', 'friends': set(['bjones1', 'jd', 'jj'])}
>>> c.cond_put('friendlists', 'jsmith1',
... {'first': 'John', 'last': 'Smith'},
... {'first': 'Jon'})
False
>>> c.cond_put('friendlists', 'jsmith1',
... {'friends': set(['bjones1', 'jd', 'jj'])},
... {'first': 'John'})
True
>>> c.get('friendlists', 'jsmith1')
{'first': 'John', 'last': 'Smith', 'friends': set(['bjones1', 'jd', 'jj'])}
>>> a.add_space('''
... space userlocks
... key username
... ''')
True
>>> c.put_if_not_exist('userlocks', 'jsmith1', {})
True
>>> c.get('userlocks', 'jsmith1')
{}
>>> c.put_if_not_exist('userlocks', 'jsmith1', {})
False
>>> c.delete('userlocks', 'jsmith1')
True
>>> def lock(client, user):
... while not client.put_if_not_exist('userlocks', user, {}):
... pass
>>> def unlock(client, user):
... client.delete('userlocks', user)
>>> lock(c, 'jsmith1')
>>> unlock(c, 'jsmith1')
>>> a.add_space('''
... space alldatatypes
... key k
... attributes
... string s,
... int i,
... float f,
... list(string) ls,
... set(string) ss,
... map(string, string) mss,
... map(string, int) msi''')
True
>>> c.put_if_not_exist('alldatatypes', 'somekey', {'s': 'initial value'})
True
>>> c.put_if_not_exist('alldatatypes', 'somekey', {'s': 'initial value'})
False
>>> # cond_put. First is predicate. May be any valid search predicate
>>> c.cond_put('alldatatypes', 'somekey', {'s': 'initial value'}, {'s': 'some string'})
True
>>> c.cond_put('alldatatypes', 'somekey', {'s': 'initial value'}, {'s': 'some string'})
False
>>> c.get('alldatatypes', 'somekey')
{'f': 0.0, 'i': 0, 'mss': {}, 'ss': set([]), 's': 'some string', 'ls': [], 'msi': {}}
>>> c.cond_put_or_create('alldatatypes', 'anotherkey', {'s': 'a'}, {'s': 'b'})
True
>>> c.cond_put_or_create('alldatatypes', 'anotherkey', {'s': 'a'}, {'s': 'b'})
False
>>> c.get('alldatatypes', 'anotherkey')
{'f': 0.0, 'i': 0, 'mss': {}, 'ss': set([]), 's': 'b', 'ls': [], 'msi': {}}
>>> c.cond_put_or_create('alldatatypes', 'anotherkey', {'s': 'b'}, {'s': 'a'})
True
>>> c.get('alldatatypes', 'anotherkey')
{'f': 0.0, 'i': 0, 'mss': {}, 'ss': set([]), 's': 'a', 'ls': [], 'msi': {}}
>>> c.atomic_add('alldatatypes', 'somekey', {'i': 1, 'f': 0.25})
True
>>> c.get('alldatatypes', 'somekey')
{'f': 0.25, 'i': 1, 'mss': {}, 'ss': set([]), 's': 'some string', 'ls': [], 'msi': {}}
>>> c.atomic_sub('alldatatypes', 'somekey', {'i': 2, 'f': 0.5})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': -1, 'mss': {}, 'ss': set([]), 's': 'some string', 'ls': [], 'msi': {}}
>>> c.atomic_mul('alldatatypes', 'somekey', {'i': 2, 'f': 4.})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -1.0, 'i': -2, 'mss': {}, 'ss': set([]), 's': 'some string', 'ls': [], 'msi': {}}
>>> c.atomic_div('alldatatypes', 'somekey', {'i': 2, 'f': 4.})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': -1, 'mss': {}, 'ss': set([]), 's': 'some string', 'ls': [], 'msi': {}}
>>> c.put('alldatatypes', 'somekey', {'i': 0xdeadbeefcafe})
True
>>> c.atomic_and('alldatatypes', 'somekey', {'i': 0xffffffff0000})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 244837814042624, 'mss': {}, 'ss': set([]), 's': 'some string', 'ls': [], 'msi': {}}
>>> print "0x%x" % (c.get('alldatatypes', 'somekey')['i'],)
0xdeadbeef0000
>>> c.atomic_or('alldatatypes', 'somekey', {'i': 0x00000000cafe})
True
>>> print "0x%x" % (c.get('alldatatypes', 'somekey')['i'],)
0xdeadbeefcafe
>>> c.atomic_xor('alldatatypes', 'somekey', {'i': 0xdea5a0403af3})
True
>>> print "0x%x" % (c.get('alldatatypes', 'somekey')['i'],)
0x81eaff00d
>>> c.string_prepend('alldatatypes', 'somekey', {'s': '->'})
True
>>> c.get('alldatatypes', 'somekey')['s']
'->some string'
>>> c.string_append('alldatatypes', 'somekey', {'s': '<-'})
True
>>> c.get('alldatatypes', 'somekey')['s']
'->some string<-'
>>> c.put('alldatatypes', 'somekey', {'ls': ['B']})
True
>>> c.list_lpush('alldatatypes', 'somekey', {'ls': 'A'})
True
>>> c.get('alldatatypes', 'somekey')['ls']
['A', 'B']
>>> c.list_rpush('alldatatypes', 'somekey', {'ls': 'C'})
True
>>> c.get('alldatatypes', 'somekey')['ls']
['A', 'B', 'C']
>>> c.set_add('alldatatypes', 'somekey', {'ss': 'C'})
True
>>> c.get('alldatatypes', 'somekey')['ss']
set(['C'])
>>> c.set_remove('alldatatypes', 'somekey', {'ss': 'C'})
True
>>> c.get('alldatatypes', 'somekey')['ss']
set([])
>>> c.set_union('alldatatypes', 'somekey', {'ss': set(['A', 'B', 'C'])})
True
>>> c.get('alldatatypes', 'somekey')['ss']
set(['A', 'C', 'B'])
>>> c.set_intersect('alldatatypes', 'somekey', {'ss': set(['A', 'B', 'Z'])})
True
>>> c.get('alldatatypes', 'somekey')['ss']
set(['A', 'B'])
>>> c.map_add('alldatatypes', 'somekey', {'mss': {'mapkey': 'mapvalue'}, 'msi': {'mapkey': 16}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 16}}
>>> c.map_add('alldatatypes', 'somekey', {'mss': {'tmp': 'delete me'}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'tmp': 'delete me', 'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 16}}
>>> c.map_remove('alldatatypes', 'somekey', {'mss': 'tmp'})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 16}}
>>> c.map_atomic_add('alldatatypes', 'somekey', {'msi': {'mapkey': 16}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 32}}
>>> c.map_atomic_sub('alldatatypes', 'somekey', {'msi': {'mapkey': -32}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 64}}
>>> c.map_atomic_mul('alldatatypes', 'somekey', {'msi': {'mapkey': 4}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 256}}
>>> c.map_atomic_div('alldatatypes', 'somekey', {'msi': {'mapkey': 64}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 4}}
>>> c.map_atomic_and('alldatatypes', 'somekey', {'msi': {'mapkey': 2}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 0}}
>>> c.map_atomic_or('alldatatypes', 'somekey', {'msi': {'mapkey': 5}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 5}}
>>> c.map_atomic_xor('alldatatypes', 'somekey', {'msi': {'mapkey': 7}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': 'mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 2}}
>>> c.map_string_prepend('alldatatypes', 'somekey', {'mss': {'mapkey': '->'}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': '->mapvalue'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 2}}
>>> c.map_string_append('alldatatypes', 'somekey', {'mss': {'mapkey': '<-'}})
True
>>> c.get('alldatatypes', 'somekey')
{'f': -0.25, 'i': 34874585101, 'mss': {'mapkey': '->mapvalue<-'}, 'ss': set(['A', 'B']), 's': '->some string<-', 'ls': ['A', 'B', 'C'], 'msi': {'mapkey': 2}}
>>> a.add_space('''
... space people
... key k
... attributes
... document info''')
True
>>> Document = hyperdex.client.Document
>>> c.put('people', 'jane', {'info' : Document( {'name': 'Jane Doe', 'gender' : 'female', 'age' : 21, 'likes' : ['cornell', 'python']} )})
True
>>> c.atomic_add('people', 'jane', {'info.age' : 1})
True
>>> c.get('people', 'jane')
{'info': Document({"name": "Jane Doe", "gender": "female", "age": 22, "likes": ["cornell", "python"]})}
>>> c.atomic_add('people', 'jane', {'info.gender' : 1})
False
>>> c.atomic_add('people', 'jane', {'info.children' : 1})
True
>>> c.get('people', 'jane')
{'info': Document({"name": "Jane Doe", "gender": "female", "age": 22, "children": 1, "likes": ["cornell", "python"]})}
>>> c.string_prepend('people', 'jane', {'info.name' : 'Dr. '})
True
>>> c.get('people', 'jane')
{'info': Document({"name": "Dr. Jane Doe", "gender": "female", "age": 22, "children": 1, "likes": ["cornell", "python"]})}
>>> c.string_append('people', 'jane', {'info.name' : ', Jr.'})
True
>>> c.get('people', 'jane')
{'info': Document({"name": "Dr. Jane Doe, Jr.", "gender": "female", "age": 22, "children": 1, "likes": ["cornell", "python"]})}
|
cactorium/HyperDex
|
test/doc.atomic-ops.py
|
Python
|
bsd-3-clause
| 10,154
|
[
"Brian"
] |
48fe3ecbfeb7a4b2f99df0a70d2f3b02dc6c32f6d56847462b283b2924a69dd7
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2004/2005 James Casbon. All rights Reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Changes made by James Casbon:
# - New Astral class
# - SQL functionality for both Scop and Astral classes
# - All sunids are int not strings
#
# Code written by Jeffrey Chang to access SCOP over the internet, which
# was previously in Bio.WWW.SCOP, has now been merged into this module.
""" SCOP: Structural Classification of Proteins.
The SCOP database aims to provide a manually constructed classification of
all know protein structures into a hierarchy, the main levels of which
are family, superfamily and fold.
* "SCOP":http://scop.mrc-lmb.cam.ac.uk/scop/
* "Introduction":http://scop.mrc-lmb.cam.ac.uk/scop/intro.html
* "SCOP parsable files":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
The Scop object in this module represents the entire SCOP classification. It
can be built from the three SCOP parsable files, modified is so desired, and
converted back to the same file formats. A single SCOP domain (represented
by the Domain class) can be obtained from Scop using the domain's SCOP
identifier (sid).
nodeCodeDict -- A mapping between known 2 letter node codes and a longer
description. The known node types are 'cl' (class), 'cf'
(fold), 'sf' (superfamily), 'fa' (family), 'dm' (domain),
'sp' (species), 'px' (domain). Additional node types may
be added in the future.
This module also provides code to access SCOP over the WWW.
Functions:
search -- Access the main CGI script.
_open -- Internally used function.
"""
from types import *
import os, string
import Des
import Cla
import Hie
from Residues import *
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
nodeCodeDict = { 'cl':'class', 'cf':'fold', 'sf':'superfamily',
'fa':'family', 'dm':'protein', 'sp':'species', 'px':'domain'}
_nodetype_to_code= { 'class': 'cl', 'fold': 'cf', 'superfamily': 'sf',
'family': 'fa', 'protein': 'dm', 'species': 'sp', 'domain': 'px'}
nodeCodeOrder = [ 'ro', 'cl', 'cf', 'sf', 'fa', 'dm', 'sp', 'px' ]
astralBibIds = [10,20,25,30,35,40,50,70,90,95,100]
astralEvs = [10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 1e-4, 1e-5, 1e-10, 1e-15,
1e-20, 1e-25, 1e-50]
astralEv_to_file = { 10: 'e+1', 5: 'e+0,7', 1: 'e+0', 0.5: 'e-0,3', 0.1: 'e-1',
0.05: 'e-1,3', 0.01: 'e-2', 0.005: 'e-2,3', 0.001: 'e-3',
1e-4: 'e-4', 1e-5: 'e-5', 1e-10: 'e-10', 1e-15: 'e-15',
1e-20: 'e-20', 1e-25: 'e-25', 1e-50: 'e-50' }
astralEv_to_sql = { 10: 'e1', 5: 'e0_7', 1: 'e0', 0.5: 'e_0_3', 0.1: 'e_1',
0.05: 'e_1_3', 0.01: 'e_2', 0.005: 'e_2_3', 0.001: 'e_3',
1e-4: 'e_4', 1e-5: 'e_5', 1e-10: 'e_10', 1e-15: 'e_15',
1e-20: 'e_20', 1e-25: 'e_25', 1e-50: 'e_50' }
def cmp_sccs(sccs1, sccs2) :
"""Order SCOP concise classification strings (sccs).
a.4.5.1 < a.4.5.11 < b.1.1.1
A sccs (e.g. a.4.5.11) compactly represents a domain's classification.
The letter represents the class, and the numbers are the fold,
superfamily, and family, respectively.
"""
s1 = sccs1.split(".")
s2 = sccs2.split(".")
if s1[0] != s2[0]: return cmp(s1[0], s2[0])
s1 = map(int, s1[1:])
s2 = map(int, s2[1:])
return cmp(s1,s2)
_domain_re = re.compile(r">?([\w_\.]*)\s+([\w\.]*)\s+\(([^)]*)\) (.*)")
def parse_domain(str) :
"""Convert an ASTRAL header string into a Scop domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. The parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(str)
if (not m) : raise ValueError, "Domain: "+ str
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues(m.group(3))
if not dom.residues.pdbid :
dom.residues.pdbid= dom.sid[1:5]
dom.description = m.group(4).strip()
return dom
def _open_scop_file(scop_dir_path, version, filetype) :
filename = "dir.%s.scop.txt_%s" % (filetype,version)
handle = open(os.path.join( scop_dir_path, filename))
return handle
class Scop:
"""The entire SCOP hierarchy.
root -- The root node of the hierarchy
"""
def __init__(self, cla_handle=None, des_handle=None, hie_handle=None,
dir_path=None, db_handle=None, version=None):
"""Build the SCOP hierarchy from the SCOP parsable files, or a sql backend.
If no file handles are given, then a Scop object with a single
empty root node is returned.
If a directory and version are given (with dir_path=.., version=...) or
file handles for each file, the whole scop tree will be built in memory.
If a MySQLdb database handle is given, the tree will be built as needed,
minimising construction times. To build the SQL database to the methods
write_xxx_sql to create the tables.
"""
self._sidDict = {}
self._sunidDict = {}
if cla_handle==des_handle==hie_handle==dir_path==db_handle==None: return
if dir_path is None and db_handle is None:
if cla_handle == None or des_handle==None or hie_handle==None:
raise RuntimeError,"Need CLA, DES and HIE files to build SCOP"
sunidDict = {}
self.db_handle = db_handle
try:
if db_handle:
# do nothing if we have a db handle, we'll do it all on the fly
pass
else:
# open SCOP parseable files
if dir_path:
if not version :
raise RuntimeError, "Need SCOP version to find parsable files in directory"
if cla_handle or des_handle or hie_handle:
raise RuntimeError,"Cannot specify SCOP directory and specific files"
cla_handle = _open_scop_file( dir_path, version, 'cla')
des_handle = _open_scop_file( dir_path, version, 'des')
hie_handle = _open_scop_file( dir_path, version, 'hie')
root = Node()
domains = []
root.sunid=0
root.type='ro'
sunidDict[root.sunid] = root
self.root = root
root.description = 'SCOP Root'
# Build the rest of the nodes using the DES file
i = Des.Iterator(des_handle, Des.Parser())
while 1 :
rec = i.next()
if rec is None : break
if rec.nodetype =='px' :
n = Domain()
n.sid = rec.name
domains.append(n)
else :
n = Node()
n.sunid = rec.sunid
n.type = rec.nodetype
n.sccs = rec.sccs
n.description = rec.description
sunidDict[n.sunid] = n
# Glue all of the Nodes together using the HIE file
i = Hie.Iterator(hie_handle, Hie.Parser())
while 1 :
rec = i.next()
if rec is None : break
if not sunidDict.has_key(rec.sunid) :
print rec.sunid
n = sunidDict[rec.sunid]
if rec.parent != '' : # Not root node
if not sunidDict.has_key(rec.parent):
raise ValueError, "Incomplete data?"
n.parent = sunidDict[rec.parent]
for c in rec.children:
if not sunidDict.has_key(c) :
raise ValueError, "Incomplete data?"
n.children.append(sunidDict[c])
# Fill in the gaps with information from the CLA file
sidDict = {}
i = Cla.Iterator(cla_handle, Cla.Parser())
while 1 :
rec = i.next()
if rec is None : break
n = sunidDict[rec.sunid]
assert n.sccs == rec.sccs
assert n.sid == rec.sid
n.residues = rec.residues
sidDict[n.sid] = n
# Clean up
self._sunidDict = sunidDict
self._sidDict = sidDict
self._domains = tuple(domains)
finally:
if dir_path :
# If we opened the files, we close the files
if cla_handle : cla_handle.close()
if des_handle : des_handle.close()
if hie_handle : hie_handle.close()
def getRoot(self):
return self.getNodeBySunid(0)
def getDomainBySid(self, sid) :
"""Return a domain from its sid"""
if self._sidDict.has_key(sid):
return self._sidDict[sid]
if self.db_handle:
self.getDomainFromSQL(sid=sid)
if self._sidDict.has_key(sid):
return self._sidDict[sid]
else:
return None
def getNodeBySunid(self, sunid) :
"""Return a node from its sunid"""
if self._sunidDict.has_key(sunid):
return self._sunidDict[sunid]
if self.db_handle:
self.getDomainFromSQL(sunid=sunid)
if self._sunidDict.has_key(sunid):
return self._sunidDict[sunid]
else:
return None
def getDomains(self) :
"""Returns an ordered tuple of all SCOP Domains"""
if self.db_handle:
return self.getRoot().getDescendents('px')
else:
return self._domains
def write_hie(self, handle) :
"""Build an HIE SCOP parsable file from this object"""
nodes = self._sunidDict.values()
# We order nodes to ease comparison with original file
nodes.sort(lambda n1,n2: cmp(n1.sunid, n2.sunid))
for n in nodes :
handle.write(str(n.toHieRecord()))
def write_des(self, handle) :
"""Build a DES SCOP parsable file from this object"""
nodes = self._sunidDict.values()
# Origional SCOP file is not ordered?
nodes.sort(lambda n1,n2: cmp(n1.sunid, n2.sunid))
for n in nodes :
if n != self.root :
handle.write(str(n.toDesRecord()))
def write_cla(self, handle) :
"""Build a CLA SCOP parsable file from this object"""
nodes = self._sidDict.values()
# We order nodes to ease comparison with original file
nodes.sort(lambda n1,n2: cmp(n1.sunid, n2.sunid))
for n in nodes :
handle.write(str(n.toClaRecord()))
def getDomainFromSQL(self, sunid=None, sid=None):
"""Load a node from the SQL backend using sunid or sid"""
if sunid==sid==None: return None
cur = self.db_handle.cursor()
if sid:
cur.execute("SELECT sunid FROM cla WHERE sid=%s", sid)
res = cur.fetchone()
if res is None:
return None
sunid = res[0]
cur.execute("SELECT * FROM des WHERE sunid=%s", sunid)
data = cur.fetchone()
if data is not None:
n = None
#determine if Node or Domain
if data[1] != "px":
n = Node(scop=self)
cur.execute("SELECT child FROM hie WHERE parent=%s", sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
else:
n = Domain(scop=self)
cur.execute("select sid, residues, pdbid from cla where sunid=%s",
sunid)
[n.sid,n.residues,pdbid] = cur.fetchone()
n.residues = Residues(n.residues)
n.residues.pdbid=pdbid
self._sidDict[n.sid] = n
[n.sunid,n.type,n.sccs,n.description] = data
if data[1] != 'ro':
cur.execute("SELECT parent FROM hie WHERE child=%s", sunid)
n.parent = cur.fetchone()[0]
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
def getAscendentFromSQL(self, node, type):
"""Get ascendents using SQL backend"""
if nodeCodeOrder.index(type) >= nodeCodeOrder.index(node.type): return None
cur = self.db_handle.cursor()
cur.execute("SELECT "+type+" from cla WHERE "+node.type+"=%s", (node.sunid))
result = cur.fetchone()
if result is not None:
return self.getNodeBySunid(result[0])
else:
return None
def getDescendentsFromSQL(self, node, type):
"""Get descendents of a node using the database backend. This avoids
repeated iteration of SQL calls and is therefore much quicker than
repeatedly calling node.getChildren().
"""
if nodeCodeOrder.index(type) <= nodeCodeOrder.index(node.type): return []
des_list = []
# SQL cla table knows nothing about 'ro'
if node.type == 'ro':
for c in node.getChildren():
for d in self.getDescendentsFromSQL(c,type):
des_list.append(d)
return des_list
cur = self.db_handle.cursor()
if type != 'px':
cur.execute("SELECT DISTINCT des.sunid,des.type,des.sccs,description FROM \
cla,des WHERE cla."+node.type+"=%s AND cla."+type+"=des.sunid", (node.sunid))
data = cur.fetchall()
for d in data:
if not self._sunidDict.has_key(int(d[0])):
n = Node(scop=self)
[n.sunid,n.type,n.sccs,n.description] = d
n.sunid=int(n.sunid)
self._sunidDict[n.sunid] = n
cur.execute("SELECT parent FROM hie WHERE child=%s", n.sunid)
n.parent = cur.fetchone()[0]
cur.execute("SELECT child FROM hie WHERE parent=%s", n.sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
des_list.append( self._sunidDict[int(d[0])] )
else:
cur.execute("SELECT cla.sunid,sid,pdbid,residues,cla.sccs,type,description,sp\
FROM cla,des where cla.sunid=des.sunid and cla."+node.type+"=%s",
node.sunid)
data = cur.fetchall()
for d in data:
if not self._sunidDict.has_key(int(d[0])):
n = Domain(scop=self)
#[n.sunid, n.sid, n.pdbid, n.residues, n.sccs, n.type,
#n.description,n.parent] = data
[n.sunid,n.sid, pdbid,n.residues,n.sccs,n.type,n.description,
n.parent] = d[0:8]
n.residues = Residues(n.residues)
n.residues.pdbid = pdbid
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
self._sidDict[n.sid] = n
des_list.append( self._sunidDict[int(d[0])] )
return des_list
def write_hie_sql(self, handle):
"""Write HIE data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS hie")
cur.execute("CREATE TABLE hie (parent INT, child INT, PRIMARY KEY (child),\
INDEX (parent) )")
for p in self._sunidDict.values():
for c in p.children:
cur.execute("INSERT INTO hie VALUES (%s,%s)" % (p.sunid, c.sunid))
def write_cla_sql(self, handle):
"""Write CLA data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS cla")
cur.execute("CREATE TABLE cla (sunid INT, sid CHAR(8), pdbid CHAR(4),\
residues VARCHAR(50), sccs CHAR(10), cl INT, cf INT, sf INT, fa INT,\
dm INT, sp INT, px INT, PRIMARY KEY (sunid), INDEX (SID) )")
for n in self._sidDict.values():
c = n.toClaRecord()
cur.execute( "INSERT INTO cla VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(n.sunid, n.sid, c.residues.pdbid, c.residues, n.sccs,
n.getAscendent('cl').sunid, n.getAscendent('cf').sunid,
n.getAscendent('sf').sunid, n.getAscendent('fa').sunid,
n.getAscendent('dm').sunid, n.getAscendent('sp').sunid,
n.sunid ))
def write_des_sql(self, handle):
"""Write DES data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS des")
cur.execute("CREATE TABLE des (sunid INT, type CHAR(2), sccs CHAR(10),\
description VARCHAR(255),\
PRIMARY KEY (sunid) )")
for n in self._sunidDict.values():
cur.execute( "INSERT INTO des VALUES (%s,%s,%s,%s)",
( n.sunid, n.type, n.sccs, n.description ) )
class Node :
""" A node in the Scop hierarchy
sunid -- SCOP unique identifiers. e.g. '14986'
parent -- The parent node
children -- A list of child nodes
sccs -- SCOP concise classification string. e.g. 'a.1.1.2'
type -- A 2 letter node type code. e.g. 'px' for domains
description --
"""
def __init__(self, scop=None) :
"""Create a Node in the scop hierarchy. If a Scop instance is provided to the
constructor, this will be used to lookup related references using the SQL
methods. If no instance is provided, it is assumed the whole tree exists
and is connected."""
self.sunid=''
self.parent = None
self.children=[]
self.sccs = ''
self.type =''
self.description =''
self.scop=scop
def __str__(self) :
s = []
s.append(str(self.sunid))
s.append(self.sccs)
s.append(self.type)
s.append(self.description)
return " ".join(s)
def toHieRecord(self):
"""Return an Hie.Record"""
rec = Hie.Record()
rec.sunid = str(self.sunid)
if self.getParent() : #Not root node
rec.parent = str(self.getParent().sunid)
else:
rec.parent = '-'
for c in self.getChildren() :
rec.children.append(str(c.sunid))
return rec
def toDesRecord(self):
"""Return a Des.Record"""
rec = Des.Record()
rec.sunid = str(self.sunid)
rec.nodetype = self.type
rec.sccs = self.sccs
rec.description = self.description
return rec
def getChildren(self):
"""Return a list of children of this Node"""
if self.scop is None:
return self.children
else:
return map ( self.scop.getNodeBySunid, self.children )
def getParent(self):
"""Return the parent of this Node"""
if self.scop is None:
return self.parent
else:
return self.scop.getNodeBySunid( self.parent )
def getDescendents( self, node_type) :
""" Return a list of all decendent nodes of the given type. Node type can a
two letter code or longer description. e.g. 'fa' or 'family'
"""
if _nodetype_to_code.has_key(node_type):
node_type = _nodetype_to_code[node_type]
nodes = [self]
if self.scop:
return self.scop.getDescendentsFromSQL(self,node_type)
while nodes[0].type != node_type:
if nodes[0].type == 'px' : return [] # Fell of the bottom of the hierarchy
child_list = []
for n in nodes:
for child in n.getChildren():
child_list.append( child )
nodes = child_list
return nodes
def getAscendent( self, node_type) :
""" Return the ancenstor node of the given type, or None.Node type can a
two letter code or longer description. e.g. 'fa' or 'family'"""
if _nodetype_to_code.has_key(node_type):
node_type = _nodetype_to_code[node_type]
if self.scop:
return self.scop.getAscendentFromSQL(self,node_type)
else:
n = self
if n.type == node_type: return None
while n.type != node_type:
if n.type == 'ro': return None # Fell of the top of the hierarchy
n = n.getParent()
return n
class Domain(Node) :
""" A SCOP domain. A leaf node in the Scop hierarchy.
sid -- The SCOP domain identifier. e.g. 'd5hbib_'
residues -- A Residue object. It defines the collection
of PDB atoms that make up this domain.
"""
def __init__(self,scop=None) :
Node.__init__(self,scop=scop)
self.sid = ''
self.residues = None
def __str__(self) :
s = []
s.append(self.sid)
s.append(self.sccs)
s.append("("+str(self.residues)+")")
if not self.getParent() :
s.append(self.description)
else :
sp = self.getParent()
dm = sp.getParent()
s.append(dm.description)
s.append("{"+sp.description+"}")
return " ".join(s)
def toDesRecord(self):
"""Return a Des.Record"""
rec = Node.toDesRecord(self)
rec.name = self.sid
return rec
def toClaRecord(self) :
"""Return a Cla.Record"""
rec = Cla.Record()
rec.sid = self.sid
rec.residues = self.residues
rec.sccs = self.sccs
rec.sunid = self.sunid
n = self
while n.sunid != 0: #Not root node
rec.hierarchy.append( (n.type, str(n.sunid)) )
n = n.getParent()
rec.hierarchy.reverse()
return rec
class Astral:
"""Abstraction of the ASTRAL database, which has sequences for all the SCOP domains,
as well as clusterings by percent id or evalue.
"""
def __init__( self, dir_path=None, version=None, scop=None, ind_file=None,
astral_file=None, db_handle=None):
"""
Initialise the astral database.
You must provide either a directory of SCOP files:
dir_path - string, the path to location of the scopseq-x.xx directory
(not the directory itself), and
version -a version number.
or, a FASTA file:
astral_file - string, a path to a fasta file (which will be loaded in memory)
or, a MYSQL database:
db_handle - a database handle for a MYSQL database containing a table
'astral' with the astral data in it. This can be created
using writeToSQL.
Note that the ind_file argument is deprecated.
"""
if ind_file :
raise RuntimeError, "The ind_file (index file) argument is deprecated"
if astral_file==dir_path==db_handle==None:
raise RunTimeError,"Need either file handle, or (dir_path + version)\
or database handle to construct Astral"
if not scop:
raise RuntimeError, "Must provide a Scop instance to construct"
self.scop = scop
self.db_handle = db_handle
if not astral_file and not db_handle:
if dir_path == None or version == None:
raise RuntimeError, "must provide dir_path and version"
self.version = version
self.path = os.path.join( dir_path, "scopseq-%s" % version)
astral_file = "astral-scopdom-seqres-all-%s.fa" % self.version
astral_file = os.path.join (self.path, astral_file)
if astral_file:
#Build a dictionary of SeqRecord objects in the FASTA file, IN MEMORY
self.fasta_dict = SeqIO.to_dict(SeqIO.parse(open(astral_file), "fasta"))
self.astral_file = astral_file
self.EvDatasets = {}
self.EvDatahash = {}
self.IdDatasets = {}
self.IdDatahash = {}
def domainsClusteredByEv(self,id):
"""get domains clustered by evalue"""
if not self.EvDatasets.has_key(id):
if self.db_handle:
self.EvDatasets[id] = self.getAstralDomainsFromSQL(astralEv_to_sql[id])
else:
if not self.path:
raise RuntimeError, "No scopseq directory specified"
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-e100m-%s-%s.id" % (file_prefix, astralEv_to_file[id] ,
self.version)
filename = os.path.join(self.path,filename)
self.EvDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.EvDatasets[id]
def domainsClusteredById(self,id):
"""get domains clustered by percent id"""
if not self.IdDatasets.has_key(id):
if self.db_handle:
self.IdDatasets[id] = self.getAstralDomainsFromSQL("id"+str(id))
else:
if not self.path:
raise RuntimeError, "No scopseq directory specified"
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-bib-%s-%s.id" % (file_prefix, id, self.version)
filename = os.path.join(self.path,filename)
self.IdDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.IdDatasets[id]
def getAstralDomainsFromFile(self,filename=None,file_handle=None):
"""Get the scop domains from a file containing a list of sids"""
if file_handle == filename == none:
raise RuntimeError, "You must provide a filename or handle"
if not file_handle:
file_handle = open(filename)
doms = []
while 1:
line = file_handle.readline()
if not line:
break
line = line.rstrip()
doms.append(line)
if filename:
file_handle.close()
doms = filter( lambda a: a[0]=='d', doms )
doms = map( self.scop.getDomainBySid, doms )
return doms
def getAstralDomainsFromSQL(self, column):
"""Load a set of astral domains from a column in the astral table of a MYSQL
database (which can be created with writeToSQL(...)"""
cur = self.db_handle.cursor()
cur.execute("SELECT sid FROM astral WHERE "+column+"=1")
data = cur.fetchall()
data = map( lambda x: self.scop.getDomainBySid(x[0]), data)
return data
def getSeqBySid(self,domain):
"""get the seq record of a given domain from its sid"""
if self.db_handle is None:
return self.fasta_dict[domain].seq
else:
cur = self.db_handle.cursor()
cur.execute("SELECT seq FROM astral WHERE sid=%s", domain)
return Seq(cur.fetchone()[0])
def getSeq(self,domain):
"""Return seq associated with domain"""
return self.getSeqBySid(domain.sid)
def hashedDomainsById(self,id):
"""Get domains clustered by sequence identity in a dict"""
if not self.IdDatahash.has_key(id):
self.IdDatahash[id] = {}
for d in self.domainsClusteredById(id):
self.IdDatahash[id][d] = 1
return self.IdDatahash[id]
def hashedDomainsByEv(self,id):
"""Get domains clustered by evalue in a dict"""
if not self.EvDatahash.has_key(id):
self.EvDatahash[id] = {}
for d in self.domainsClusteredByEv(id):
self.EvDatahash[id][d] = 1
return self.EvDatahash[id]
def isDomainInId(self,dom,id):
"""Returns true if the domain is in the astral clusters for percent ID"""
return self.hashedDomainsById(id).has_key(dom)
def isDomainInEv(self,dom,id):
"""Returns true if the domain is in the ASTRAL clusters for evalues"""
return self.hashedDomainsByEv(id).has_key(dom)
def writeToSQL(self, db_handle):
"""Write the ASTRAL database to a MYSQL database"""
cur = db_handle.cursor()
cur.execute("DROP TABLE IF EXISTS astral")
cur.execute("CREATE TABLE astral (sid CHAR(8), seq TEXT, PRIMARY KEY (sid))")
for dom in self.fasta_dict.keys():
cur.execute( "INSERT INTO astral (sid,seq) values (%s,%s)",
(dom, self.fasta_dict[dom].seq.data))
for i in astralBibIds:
cur.execute("ALTER TABLE astral ADD (id"+str(i)+" TINYINT)")
for d in self.domainsClusteredById(i):
cur.execute("UPDATE astral SET id"+str(i)+"=1 WHERE sid=%s",
d.sid)
for ev in astralEvs:
cur.execute("ALTER TABLE astral ADD ("+astralEv_to_sql[ev]+" TINYINT)")
for d in self.domainsClusteredByEv(ev):
cur.execute("UPDATE astral SET "+astralEv_to_sql[ev]+"=1 WHERE sid=%s",
d.sid)
def search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds):
"""search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds)
Access search.cgi and return a handle to the results. See the
online help file for an explanation of the parameters:
http://scop.mrc-lmb.cam.ac.uk/scop/help.html
Raises an IOError if there's a network error.
"""
params = {'pdb' : pdb, 'key' : key, 'sid' : sid, 'disp' : disp,
'dir' : dir, 'loc' : loc}
variables = {}
for k in params.keys():
if params[k] is not None:
variables[k] = params[k]
variables.update(keywds)
return _open(cgi, variables)
def _open(cgi, params={}, get=1):
"""_open(cgi, params={}, get=1) -> UndoHandle
Open a handle to SCOP. cgi is the URL for the cgi script to access.
params is a dictionary with the options to pass to it. get is a boolean
that describes whether a GET should be used. Does some
simple error checking, and will raise an IOError if it encounters one.
"""
import urllib
from Bio import File
# Open a handle to SCOP.
options = urllib.urlencode(params)
if get: # do a GET
fullcgi = cgi
if options:
fullcgi = "%s?%s" % (cgi, options)
handle = urllib.urlopen(fullcgi)
else: # do a POST
handle = urllib.urlopen(cgi, options)
# Wrap the handle inside an UndoHandle.
uhandle = File.UndoHandle(handle)
# Should I check for 404? timeout? etc?
return uhandle
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/SCOP/__init__.py
|
Python
|
apache-2.0
| 32,825
|
[
"Biopython"
] |
8f77b0f7c92424fd6a2117ad465d3447cca43451153a5170d6894926e6670c8a
|
#
# bioinformatics 3, wintersemester 89
# all code by Mathias Bader
# Modified by Dan Pipe-Mazo for Caltech Ditch Day, but not much.
#
import sys, os, string, random, time, copy, json
from math import sqrt
# gives tk namespace for graphical output
import Tkinter as tk
hub_dict = {
#Hub # Links
"Iron Man" : ["Justin Hammer", "Tony Stark", "Jarvis", "Arc Reactor", "Mandarin", "War Machine", 'Marvel'],
"Mandarin" : ["Orange", "Chinese"],
"Chinese" : ["New Year", "Finger Trap", "Checkers", "Zodiac", "Language"],
"Language" : ["Spanish", "German", "French", "English", "Japanese", "Programming"],
"Programming" : ["Perl", "Java", "C++", "Python", "Fortran", "PHP", "Ruby", "Assembly"],
"Game" : ["Checkers", "Chess", "Video Game", "Basketball", "Baseball", "of Thrones", "Boy", "Theory"],
"Chess" : ["Deep Blue", "Checkmate", "Rook", "Knight", "Bishop", "Pawn", "Checkers"],
"Knight" : ["Duke", "Baron", "Earl", "of the Round Table"],
"Duke" : ["Penn", "Yale", "Harvard", "Princeton", "Stanford", "Caltech", "Basketball"],
"IBM" : ["Watson", "Deep Blue", "Computer", "Silicon Valley"],
"Baseball" : ["Dodgers", "Yankees", "Red Sox", "Angels", "Giants", "Phillies"],
"Arc Reactor" : ["Palladium"],
"Vibranium" : ["Arc Reactor", "Element", "Metal"],
"Element" : ["Palladium", "Oxygen", "Carbon", "Gold", "Iron", "Magnesium","Silicon"],
"Carbon" : ["Dating", "Copy", "Dioxide", "Graphite", "Diamond"],
"Ruby" : ["Red"],
"Pepper" : ["Tony Stark", "Jalapeno", "Bell", "Cayenne", "Green", "Red Hot Chili", "Salt", "Gwyneth Paltrow"],
"Green" : ["Lantern", "Goblin", "Emerald"],
"Gem" : ["Emerald", "Ruby", "Diamond", "Sapphire"],
"Blue" : ["Sapphire"],
"Lantern" : ["Festival", "Flashlight", "Lamp", "Lightbulb", "Torch"],
"Rock Band" : ["Video Game", "Red Hot Chili", "Aerosmith", "Nirvana", "Rolling Stones", "Iron Maiden", "Metallica"],
"Red" : ["Red Sox", "Red Hot Chili", "Apple", "Red Robin"],
"Metal" : ["Vibranium","Gold","Iron", "Palladium", "Metallica", "Iron Maiden"],
"Orange" : ["Apple", "Peach", "Banana", "Watermelon", "Grape"],
"Apple" : ["Silicon Valley", "Macintosh", "Jobs", "Wozniak", "Cupertino", "Computer", "iPhone"],
"California" : ["Cupertino", "State", "Silicon Valley", "Caltech"],
"Silicon Valley": ["Silicon", "Paypal", "Google", "Microsoft", "Tesla", "Cupertino", "Facebook", "Stanford"],
"Movie" : ["Iron Man", "Jobs", "Pixar", "Marvel", "Popcorn", "Trailer", "Director", "Artificial Intelligence"],
"Emma" : ["Stone", "Watson", "Roberts", "Frost"],
"Robert Downey Junior" : ["Roberts", "Iron Man", "Sherlock Holmes", "Tropic Thunder"],
"Watson" : ["Computer", "Sherlock Holmes", "Crick", "Emma", "IBM"],
"Roberts" : ["Dread Pirate", "Emma", "Julia", "John"],
"Julia" : ["Child", "Stiles", "Andrews"],
"Pixar" : ["Jobs", "Cars", "Toy Story", "Finding Nemo", "Monsters, Inc.", "The Incredibles", "Movie"],
"Cars" : ["BMW", "Audi", "Toyota", "Honda", "Mazda", "Tesla"],
"Scientist" : ["Tesla", "Watson", "Einstein", "Edison", "Bell", "Curie", "Darwin", "Peter Parker", "Bruce Banner", "Tony Stark", "Doctor Octopus"],
"Bruce Banner" : ["Hulk"],
"Edison" : ["Lightbulb"],
"Elon Musk" : ["Tesla", "SpaceX", "Paypal", "Hyperloop", "Tony Stark", "Penn"],
"Avengers" : ["Iron Man", "Thor", "Hulk", "Captain America", "Hawkeye", "Black Widow", "Movie", "Marvel", "Zodiac", "Wolverine", "Ultron", "Coulson"],
"Marvel" : ["Avengers", "X-Men", "Spider-Man", "Captain America", "Hulk", "Thor", "Frost"],
"X-Men" : ["Professor X", "Cyclops", "Iceman", "Angel", "Mystique", "Storm", "Wolverine", "Magneto"],
"Color" : ["Green", "Orange", "Red", "Blue", "Yellow", "Purple", "Pink", "Deep Blue", "Magenta", "Teal"],
"Green" : ["Hulk"],
"Computer" : ["Deep Blue", "Macintosh", "Microsoft", "Eniac", "Windows", "Facebook", "Programming"],
"Eniac" : ["Penn", "Fortran"],
"Doctor" : ["Pepper", "Doctor Octopus"],
"Spider-Man" : ["Doctor Octopus", "Avengers", "Peter Parker"],
"Entrepreneur" : ["Tony Stark", "Elon Musk", "Justin Hammer", "Jobs", "Edison", "Bruce Wayne", "Bill Gates"],
"Artificial Intelligence" : ["Jarvis", "Deep Blue", "Google", "Ultron", "Watson"],
"Tony Stark" : ["Stark Tower"],
"SHIELD" : ["Avengers", "Nick Fury", "Maria Hill", "Coulson", "Knight", "Agents of SHIELD"],
"Magic" : ["Mandarin", "Penn and Teller", "Houdini"],
"Penn" : ["Penn and Teller"],
"Dark Knight" : ["Batman", "Knight", "Bruce Wayne"],
"Microsoft" : ["Windows", "Bill Gates"],
"DC" : ["Lantern", "Dark Knight", "Batman", "Marvel"],
"Video Game" : ["Minecraft", "Halo"],
"PHP" : ["Facebook"],
"Cobie Smulders": ["Robin", "Maria Hill", "How I Met Your Mother"],
"Robin" : ["Red Robin", "Batman"],
"Sports" : ["Game", "Baseball", "Basketball", "Football", "Soccer", "Hockey", "Tennis", "Golf"],
"Japan" : ["Baseball", "Japanese", "Samurai", "Sushi"],
"TV Shows" : ["Agents of SHIELD", "How I Met Your Mother", "Breaking Bad", "Mad Men"]
}
center_distance = 10.0 # the distance from the middle of the screen to each border
scaling_factor = 1.0 # the zoom-factor (the smaller, the more surface is shown)
zooming = 0 # is the application zooming right now?
zoom_in_border = 1.0 # limit between graph and screen-border for zooming in
zooming_out = 0
circle_diameter = 20 # the diameter of the node-circles
timestep = 0
thermal_energie = 0.0 # set this to 0.3 or 0.0 to (de)activate thermal_energie
all_energies = [] # list of all energies sorted by time
highest_energy = 0 # the highest energie occuring
energie_change_limit = 0.0000001 # if energie doesn't change more than this, process is stoped
velocity_maximum = 0.05
friction = 0.0005 # is subtracted from the velocity at each timestep for stop oscillations
show_energies_in_background = 1
status_message = ''
grabed_node = ''
grabed_component = ''
dont_finish_calculating = 1
show_energie_in_background = 1
show_textinformation_in_background = 1
#screen properties
c_width = 1000
c_height = 600
border = 20
filename = "iron_man_data.txt"
if (len(sys.argv) == 2 and sys.argv[1] != ""):
filename = sys.argv[1]
# Class for Nodes
class Node:
def __init__(self, node_id):
self.id = node_id # id (as an integer for example)
self.neighbour_ids = [] # list of the ids of the neighbours
self.degree = 0 # number of neighbours
self.coordinate_x = 0
self.coordinate_y = 0
self.force_coulomb = 0
self.force_harmonic = 0
self.cc_number = 0 # the number of the connected component (0 if not assigned yet)
self.cc_centers = []
self.velocity = [0,0] # instead of replacing the nodes, change its velocity to produce inertia
self.movable = 1
def getNeighbours(self):
return self.neighbour_ids
def getDegree(self):
return self.degree
def getId(self):
return self.id
def setNeighbour(self, node_id):
self.neighbour_ids.append(node_id)
self.degree += 1
def deleteNeighbour(self, node_id):
self.neighbour_ids.remove(node_id)
self.degree -= 1
# Class for graph
class Graph:
def __init__(self):
# build an empty graph
self.nodes = [] # list of Node-objects
self.edges = [] # list of tupels (node1-id, node2-id) where node1-id is always smaller than node2-id
self.last_added_id = -1
self.connected_components_count = 0
self.overall_energie = 0
self.overall_energie_difference = 1000
self.calculation_finished = 0
self.nodes_dumped = False
def addNode(self, node_id):
# adds a node to the graph
if node_id == self.last_added_id: return # speed up adding of same ids consecutively
for x in self.nodes:
if x.getId() == node_id:
return
new_node = Node(node_id)
if (node_id == "Iron Man"):
new_node.movable = 0
self.nodes.append(new_node)
self.last_added_id = node_id
def addEdge(self, node_id_1, node_id_2):
# adds an edge between two nodes
if node_id_1 != node_id_2 and node_id_1 >= 0 and node_id_2 >= 0 and not self.isEdge(node_id_1, node_id_2):
if node_id_1 < node_id_2:
self.edges.append((node_id_1, node_id_2))
else:
self.edges.append((node_id_2, node_id_1))
# search for the two node-objects with fitting ids
node1 = self.getNode(node_id_1)
node2 = self.getNode(node_id_2)
node1.setNeighbour(node_id_2)
node2.setNeighbour(node_id_1)
def deleteEdge(self, (node_id_1, node_id_2)):
# deletes the edge between node_id_1 and node_id_2
if node_id_1 > node_id_2:
# switch the two node-ids (edges are always saved with smaller id first)
tmp = node_id_1
node_id_1 = node_id_2
node_id_2 = tmp
self.edges.remove((node_id_1, node_id_2))
node1 = self.getNode(node_id_1)
node1.deleteNeighbour(node_id_2)
node2 = self.getNode(node_id_2)
node2.deleteNeighbour(node_id_1)
def nodesList(self):
# returns the list of ids of nodes
list_of_ids = []
for node in self.nodes:
list_of_ids.append(node.id)
return list_of_ids
def edgesList(self):
# returns the list of edges ([(id, id), (id, id), ...]
return self.edges
def degreeList(self):
# returns a dictionary with the degree distribution of the graph
degrees = {}
for x in self.nodes:
if degrees.has_key(x.degree):
degrees[x.degree] += 1
else:
degrees[x.degree] = 1
return degrees
def countNodes(self):
# prints the number of nodes
return len(self.nodes)
def countEdges(self):
# prints the number of nodes
return len(self.edges)
def printNodes(self):
# prints the list of nodes
to_print = '['
count = 0
for x in self.nodes:
to_print = to_print + str(x.getId()) + ','
count += 1
if count > 200:
print to_print,
to_print = ''
count = 1
if count > 0: to_print = to_print[:-1]
to_print = to_print + ']'
print to_print
def printEdges(self):
# prints the list of edges
to_print = '['
count = 0
for (n1, n2) in self.edges:
to_print = to_print + '(' + str(n1) + ',' + str(n2) + '), '
count += 1
if count > 200:
print to_print,
to_print = ''
count = 1
if count > 0: to_print = to_print[:-2]
to_print = to_print + ']'
print to_print
def printData(self):
# prints number of nodes and edges
print 'graph with', len(self.nodes), 'nodes and', len(self.edges), 'edges'
print
for node in self.nodes:
print 'x coordinate of', node.id, 'is', node.coordinate_x
print 'y coordinate of', node.id, 'is', node.coordinate_y
print
def isEdge(self, node_id_1, node_id_2):
if node_id_1 > node_id_2:
# switch the two node-ids (edges are always saved with smaller id first)
tmp = node_id_1
node_id_1 = node_id_2
node_id_2 = tmp
# checks if there is an edge between two nodes
for x in self.edges:
if x == (node_id_1, node_id_2): return True
return False
def getNode(self, node_id):
# returns the node for a given id
for x in self.nodes:
if x.getId() == node_id:
return x
def getNodes(self):
return self.nodes
def SetRandomNodePosition(self):
# sets random positions for all nodes
for node in self.nodes:
if (node.id != "Iron Man"):
node.coordinate_x = random.random() * center_distance - (center_distance/2)
node.coordinate_y = random.random() * center_distance - (center_distance/2)
def paintGraph(self):
# (re)Paints the graph on the surface of the window
# clear the screen
for c_item in c.find_all():
c.delete(c_item)
# plot the energie vs time in the background of the window
if show_energie_in_background == 1:
if show_energies_in_background == 1:
global all_energies
energies_count = len(all_energies)
# only show the last 200 energies at maximum
if energies_count > 200:
start_point = energies_count - 200
else:
start_point = 0
for i in range(start_point, energies_count):
c.create_rectangle(border+(c_width)/(energies_count-start_point)*(i-start_point), border+c_height-(c_height/highest_energy*all_energies[i]), border + (c_width)/(energies_count-start_point)+(c_width)/(energies_count-start_point)*(i-start_point), c_height+border, fill="#eee", outline="#ddd")
# draw the coordinate system with the center
c.create_line (border, c_height/2+border, (c_width+border), c_height/2+border, fill="#EEEEEE")
c.create_line (c_width/2+border, border, c_width/2+border, c_height+border*2+border, fill="#EEEEEE")
# Output info via text
if show_textinformation_in_background == 1:
# opened file
c.create_text(20, 40, anchor=tk.SW, text=str('opened file: ' +filename), font=("Helvectica", "10"), fill="#AAAAAA")
# timestep
c.create_text(20, 60, anchor=tk.SW, text=str('timestep: ' +str(timestep)), font=("Helvectica", "10"), fill="#AAAAAA")
# overall energie
c.create_text(20, 80, anchor=tk.SW, text=str('overall energie: ' +str(self.overall_energie)), font=("Helvectica", "10"), fill="#AAAAAA")
c.create_text(20, 100, anchor=tk.SW, text=str('overall energie difference: ' +str(self.overall_energie_difference)), font=("Helvectica", "10"), fill="#AAAAAA")
# number of components if more than one
if self.connected_components_count > 1:
c.create_text(20, 125, anchor=tk.SW, text=str('number of connected components: ' + str(self.connected_components_count)), font=("Helvectica", "14"), fill="#AAAAAA")
# thermal_energie if there is still
if thermal_energie > 0:
c.create_text(20, 160, anchor=tk.SW, text=str('thermal energie: ' +str(thermal_energie)), font=("Helvectica", "20"), fill="#AAAAAA")
# Calculation finished-message
if self.calculation_finished:
c.create_text(550, 60, anchor=tk.SW, text=str('Calculation finished after ' + str(timestep) + ' steps'), font=("Helvectica", "20"), fill="#000")
# status message on the bottom of the screen
if status_message != '':
c.create_text(20, c_height, anchor=tk.SW, text=str(status_message), font=("Helvectica", "12"), fill="#000")
# Show 'Now zooming out' if it is zoomed right now
if zooming > 0:
# Detect correct color for fade-out effect
if zooming >= 40:
color_string = "AAAAAA"
if zooming >=30 and zooming < 40:
color_string = "BBBBBB"
if zooming >=20 and zooming < 30:
color_string = "CCCCCC"
if zooming >=10 and zooming < 20:
color_string = "DDDDDD"
if zooming >= 1 and zooming < 10:
color_string = "EEEEEE"
if zooming_out == 1:
c.create_text(c_width/12+border, c_height/2+border, anchor=tk.SW, text=str('Now zooming out'), fill="#" + color_string, font=("Helvectica", "40"))
else:
c.create_text(c_width/12+border, c_height/2+border, anchor=tk.SW, text=str('Now zooming in'), fill="#" + color_string, font=("Helvectica", "40"))
# DRAW AlL EDGES OF THE GRAPH
for node in g.getNodes():
# calculate position of this node
x0 = ((node.coordinate_x*scaling_factor + (center_distance/2)) / center_distance * c_width) + border
y0 = ((node.coordinate_y*scaling_factor + (center_distance/2)) / center_distance * c_height) + border
# draw all the edges to neighbors of this node
for neighbor_id in node.neighbour_ids:
node2 = self.getNode(neighbor_id)
if (node.id > node2.id):
x1 = ((node2.coordinate_x*scaling_factor + (center_distance/2)) / center_distance * c_width) + border
y1 = ((node2.coordinate_y*scaling_factor + (center_distance/2)) / center_distance * c_height) + border
c.create_line (x0 + circle_diameter*scaling_factor / 2, y0 + circle_diameter*scaling_factor / 2, x1 + circle_diameter*scaling_factor / 2, y1 + circle_diameter*scaling_factor / 2)
# DRAW AlL NODES OF THE GRAPH
for node in g.getNodes():
# calculate position of this node
x0 = ((node.coordinate_x*scaling_factor + (center_distance/2)) / center_distance * c_width) + border
y0 = ((node.coordinate_y*scaling_factor + (center_distance/2)) / center_distance * c_height) + border
# draw this node
fill_color = "AAA"
if (node.cc_number <= 5):
if (node.cc_number == 1):
fill_color = "0C0" # green
if (node.cc_number == 2):
fill_color = "00C" # blue
if (node.cc_number == 3):
fill_color = "C00" # red
if (node.cc_number == 4):
fill_color = "FF2" # yellow
if (node.cc_number == 5):
fill_color = "FFB63D" # orange
if node.movable == 1:
c.create_oval(x0, y0, x0 + circle_diameter*scaling_factor, y0 + circle_diameter*scaling_factor, fill="#" + fill_color)
else:
c.create_oval(x0, y0, x0 + circle_diameter*scaling_factor, y0 + circle_diameter*scaling_factor, fill="#000")
else:
if (node.cc_number == 6):
fill_color = "FF2" # yellow
if (node.cc_number == 7):
fill_color = "00C" # blue
if (node.cc_number == 8):
fill_color = "C00" # red
if (node.cc_number == 9):
fill_color = "0C0" # green
if node.movable == 1:
c.create_rectangle(x0, y0, x0 + circle_diameter*scaling_factor, y0 + circle_diameter*scaling_factor, fill="#" + fill_color)
else:
c.create_rectangle(x0, y0, x0 + circle_diameter*scaling_factor, y0 + circle_diameter*scaling_factor, fill="#000")
# write the id under the node
c.create_text(x0, y0 + circle_diameter*scaling_factor + 20, anchor=tk.SW, text=str(node.id))
# c.create_text(x0, y0 + circle_diameter*scaling_factor + 40, anchor=tk.SW, text=str(node.cc_number), fill="#008800")
root.protocol("WM_DELETE_WINDOW", root.destroy)
root.update()
#Dump the nodes to disk, with coordinates
if ( (timestep > 2000) and (self.nodes_dumped == False) ):
self.dumpNodes()
self.nodes_dumped = True
#This function dumps the nodes fo the graph into a file
def dumpNodes(self):
# This is the amount to scale the coordinates by for the ddserver
dd_scale = 2
# Make a list of all of the nodes
node_list = []
max_x = 0
max_y = 0
min_x = 0
min_y = 0
for node in self.nodes:
node_dict = {}
# Get all of the basic info
node_dict["x"] = (((node.coordinate_x*scaling_factor)/dd_scale) * c_width)
node_dict["y"] = (((node.coordinate_y*scaling_factor)/dd_scale) * c_height)
node_dict["word"] = node.id
node_dict["links"] = node.neighbour_ids
node_dict["hue"] = random.randint(0, 360)
node_dict["sat"] = random.randint(20, 80)
node_dict["light"] = random.randint(20, 60)
# And now copy the dictionary into the list If it's
# Iron man, put it in first
if (node.id == "Iron Man"):
node_list.insert(0, copy.deepcopy(node_dict))
else:
node_list.append(copy.deepcopy(node_dict))
# Update the max and mins
if (node_dict["x"] > max_x):
max_x = node_dict["x"]
if (node_dict["y"] > max_y):
max_y = node_dict["y"]
if (node_dict["x"] < min_x):
min_x = node_dict["x"]
if (node_dict["y"] < min_y):
min_y = node_dict["y"]
# And dump the json to disk
outfile = open("jsons.dta", 'w')
outfile.write(json.dumps(node_list))
outfile.close()
# And report the max and min pixel coordinates
print "Max x: {}, Max y: {}, Min x: {}, Min y: {}".format(max_x, max_y, min_x, min_y)
def calculateStep(self):
new_overall_energie = 0
# calculate the repulsive force for each node
for node in self.nodes:
node.force_coulomb = [0,0]
for node2 in self.nodes:
if (node.id != node2.id) and (node.cc_number == node2.cc_number):
distance_x = node.coordinate_x - node2.coordinate_x
distance_y = node.coordinate_y - node2.coordinate_y
radius = sqrt(distance_x*distance_x + distance_y*distance_y)
if radius != 0:
vector = [distance_x/radius, distance_y/radius]
node.force_coulomb[0] += 0.01 * vector[0] / radius**2.3
node.force_coulomb[1] += 0.01 * vector[1] / radius**2.3
# # Make bigger nodes more repulsive if not attached to it
# if (node2 not in node.neighbour_ids):
# node.force_coulomb[0] *= (1 + len(node2.neighbour_ids)/10)
# node.force_coulomb[0] *= (1 + len(node2.neighbour_ids)/10)
# add this force to the overall energie
new_overall_energie += 0.01 / radius**2.3
else:
# if the nodes lie on each other, randomly replace them a bit
node.force_coulomb[0] += random.random() - 0.5
node.force_coulomb[1] += random.random() - 0.5
# calculate the attractive force for each node, make the
# number of connections for the node weigh heavier
for node in self.nodes:
node.force_harmonic = [0,0]
for neighbor_id in node.neighbour_ids:
node2 = self.getNode(neighbor_id)
distance_x = node.coordinate_x - node2.coordinate_x
distance_y = node.coordinate_y - node2.coordinate_y
radius = sqrt(distance_x*distance_x + distance_y*distance_y)
if radius != 0:
vector = [distance_x/radius* -1, distance_y/radius * -1]
force_harmonic_x = (vector[0] *radius**2.3 )/100
force_harmonic_y = (vector[1] *radius**2.3 )/100
else:
# if the nodes lie on each other, randomly replace them a bit
force_harmonic_x = random.random() - 0.5
force_harmonic_y = random.random() - 0.5
node.force_harmonic[0] += force_harmonic_x
node.force_harmonic[1] += force_harmonic_y
# add this force to the overall energie
new_overall_energie += (radius**2.3)/100
# calculate the difference between the old and new overall energie
self.overall_energie_difference = self.overall_energie - new_overall_energie
self.overall_energie = new_overall_energie
all_energies.append(self.overall_energie)
global highest_energy
if self.overall_energie > highest_energy:
highest_energy = self.overall_energie
if not dont_finish_calculating:
if (self.overall_energie_difference < energie_change_limit and self.overall_energie_difference > -1*energie_change_limit):
self.calculation_finished = 1
# set the new position influenced by the force
global thermal_energie
if timestep == 50 and thermal_energie > 0:
thermal_energie = 0.2
if timestep == 110 and thermal_energie > 0:
thermal_energie = 0.1
if timestep == 150 and thermal_energie > 0:
thermal_energie = 0.0
for node in self.nodes:
(force_coulomb_x, force_coulomb_y) = node.force_coulomb
(force_harmonic_x, force_harmonic_y) = node.force_harmonic
# node.coordinate_x += force_coulomb_x + force_harmonic_x
# node.coordinate_y += force_coulomb_y + force_harmonic_y
node.velocity[0] += (force_coulomb_x + force_harmonic_x)*0.1
node.velocity[1] += (force_coulomb_y + force_harmonic_y)*0.1
# ensure maximum velocity
if (node.velocity[0] > velocity_maximum):
node.velocity[0] = velocity_maximum
if (node.velocity[1] > velocity_maximum):
node.velocity[1] = velocity_maximum
if (node.velocity[0] < -1*velocity_maximum):
node.velocity[0] = -1*velocity_maximum
if (node.velocity[1] < -1*velocity_maximum):
node.velocity[1] = -1*velocity_maximum
# get friction into play
if node.velocity[0] > friction:
node.velocity[0] -= friction
if node.velocity[0] < -1*friction:
node.velocity[0] += friction
if node.velocity[1] > friction:
node.velocity[1] -= friction
if node.velocity[1] < -1*friction:
node.velocity[1] += friction
# FINALLY SET THE NEW POSITION
if node.id != grabed_node or node.cc_number == grabed_component:
if node.movable == 1:
node.coordinate_x += node.velocity[0]
node.coordinate_y += node.velocity[1]
if thermal_energie > 0:
if node.movable == 1:
node.coordinate_x += random.random()*thermal_energie*2-thermal_energie
node.coordinate_y += random.random()*thermal_energie*2-thermal_energie
# calculate centers for all connected components
min_max = []
center = []
for i in range(0, self.connected_components_count):
min_max.append([1000,1000,-1000,-1000])
for i in range(0, self.connected_components_count):
for node in self.getNodes():
if node.cc_number == i+1:
if node.coordinate_x < min_max[i][0]:
min_max[i][0] = node.coordinate_x
if node.coordinate_y < min_max[i][1]:
min_max[i][1] = node.coordinate_y
if node.coordinate_x > min_max[i][2]:
min_max[i][2] = node.coordinate_x
if node.coordinate_y > min_max[i][3]:
min_max[i][3] = node.coordinate_y
center.append([min_max[i][0] + (min_max[i][2] - min_max[i][0])/2, min_max[i][1] + (min_max[i][3] - min_max[i][1])/2])
# if two components lie on each other, increase the distance between those
for a in range(0, self.connected_components_count):
for b in range(0, self.connected_components_count):
# if a != b and center[a][0] > min_max[b][0] and center[a][0] < min_max[b][2] and center[a][1] > min_max[b][1] and center[a][1] < min_max[b][3]:
if a != b:
distance = 1
if ((min_max[a][0]+distance > min_max[b][0] and min_max[a][0]-distance < min_max[b][2]) or (min_max[a][2]+distance > min_max[b][0] and min_max[a][2]-distance < min_max[b][2])) and ((min_max[a][1]+distance > min_max[b][1] and min_max[a][1]-distance < min_max[b][3]) or (min_max[a][3]+distance > min_max[b][1] and min_max[a][3]-distance < min_max[b][3])):
# calculate replacement with help of the distance vector
# of the centers
distance_x = center[a][0] - center[b][0]
distance_y = center[a][1] - center[b][1]
radius = sqrt(distance_x*distance_x + distance_y*distance_y)
replacement = [distance_x/radius* -1, distance_y/radius * -1]
replacement[0] *= random.random() * -0.1
replacement[1] *= random.random() * -0.1
for node in self.nodes:
if node.cc_number == a+1:
if node.id != grabed_node:
if node.movable == 1:
node.coordinate_x += replacement[0]
node.coordinate_y += replacement[1]
# calculate the center of the graph and position all nodes new, so that
# the center becomes (0,0)
x_max = -1000
x_min = 1000
y_max = -1000
y_min = 1000
for node in self.getNodes():
if node.coordinate_x < x_min:
x_min = node.coordinate_x
if node.coordinate_x > x_max:
x_max = node.coordinate_x
if node.coordinate_y < y_min:
y_min = node.coordinate_y
if node.coordinate_y > y_max:
y_max = node.coordinate_y
center_x = x_min + (x_max - x_min)/2
center_y = y_min + (y_max - y_min)/2
for node in g.getNodes():
if node.id != grabed_node:
if (node.movable == 1):
node.coordinate_x -= center_x
node.coordinate_y -= center_y
scale = 0
# prevent nodes from leaving the screen - ZOOM OUT
if (x_min < (center_distance/scaling_factor/-2)) or (y_min < (center_distance/scaling_factor/-2)) or (x_max > (center_distance/scaling_factor/2)):
scale = 1
# longer if-statement because node-caption is included
if (y_max > (center_distance/scaling_factor/2)-((circle_diameter+20)*scaling_factor*center_distance/c_height)):
scale = 1
# zoom back in if necessary - ZOOM IN
if (x_min - zoom_in_border > (center_distance/scaling_factor/-2)) and (y_min - zoom_in_border > (center_distance/scaling_factor/-2)) and (x_max + zoom_in_border < (center_distance/scaling_factor/2)) and (y_max + zoom_in_border < (center_distance/scaling_factor/2)-((circle_diameter+10)*scaling_factor*center_distance/c_height)):
scale = -1
if scale == 1:
# zoom out
global scaling_factor
global zooming
global zooming_out
scaling_factor = scaling_factor * 0.99
zooming = 50
zooming_out = 1
else:
# zoom in
if scale == -1:
global scaling_factor
global zooming
global zooming_out
scaling_factor = scaling_factor * 1.01
zooming = 50
zooming_out = 0
else:
# don't zoom (count down the fading for the zooming message)
global zooming
if zooming > 0:
zooming -= 1
def calculateConnectedComponents(self):
# calculate the connected components of the graph
all_node_ids = []
for node in self.nodes:
all_node_ids.append(node.id)
visited_node_ids = []
node_ids_to_process = []
connected_component_number = 0
while len(all_node_ids) > 0:
# take an anchor node
node_ids_to_process.append(all_node_ids.pop())
connected_component_number += 1
# process all nodes that are reachable from the anchor-node
while len(node_ids_to_process) > 0:
anchor_node_id = node_ids_to_process.pop()
# set the anchors cc_number and add all neighbors to the process
# list that haven't been yet
anchor_node = self.getNode(anchor_node_id)
anchor_node.cc_number = connected_component_number
for neighbor_node_id in anchor_node.neighbour_ids:
if not neighbor_node_id in visited_node_ids:
node_ids_to_process.append(neighbor_node_id)
if neighbor_node_id in all_node_ids:
all_node_ids.remove(neighbor_node_id)
# this node is finished
visited_node_ids.append(anchor_node_id)
self.connected_components_count = connected_component_number
def empty(self):
self.clear()
def clear(self):
# deletes all nodes and edges in the graph
self.nodes = []
self.edges = []
# these events handle the interaction with the mouse
def event_button1_pressed(event):
# recalculate the position into node-coordinates
coordinate_x0 = ((event.x - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_y0 = ((event.y - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_x1 = (((event.y + circle_diameter*scaling_factor) - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_y1 = (((event.y + circle_diameter*scaling_factor) - border) * center_distance / c_width - center_distance/2) / scaling_factor
for node in g.nodes:
if node.coordinate_x > coordinate_x0 and node.coordinate_x < coordinate_x1 and node.coordinate_y > coordinate_y0 and node.coordinate_y < coordinate_y1:
global grabed_node
grabed_node = node.id
def event_button1_motion(event):
# recalculate the position into node-coordinates
coordinate_x0 = ((event.x - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_y0 = ((event.y - border) * center_distance / c_height - center_distance/2) / scaling_factor
coordinate_x1 = (((event.x + circle_diameter*scaling_factor) - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_y1 = (((event.y + circle_diameter*scaling_factor) - border) * center_distance / c_height - center_distance/2) / scaling_factor
global grabed_node
if grabed_node == '':
for node in g.nodes:
if node.coordinate_x > coordinate_x0 and node.coordinate_x < coordinate_x1 and node.coordinate_y > coordinate_y0 and node.coordinate_y < coordinate_y1:
grabed_node = node.id
else:
node = g.getNode(grabed_node)
distance_x = coordinate_x0 - node.coordinate_x
distance_y = coordinate_y0 - node.coordinate_y
node.coordinate_x = coordinate_x0
node.coordinate_y = coordinate_y0
radius = sqrt(distance_x*distance_x + distance_y*distance_y)
if radius != 0:
node.velocity = [distance_x, distance_y]
# ensure maximum velocity
if (node.velocity[0] > velocity_maximum):
node.velocity[0] = velocity_maximum
if (node.velocity[1] > velocity_maximum):
node.velocity[1] = velocity_maximum
if (node.velocity[0] < -1*velocity_maximum):
node.velocity[0] = -1*velocity_maximum
if (node.velocity[1] < -1*velocity_maximum):
node.velocity[1] = -1*velocity_maximum
def event_button1_released(event):
global status_message
global grabed_node
grabed_node = ''
def event_button3_pressed(event):
# recalculate the position into node-coordinates
coordinate_x0 = ((event.x - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_y0 = ((event.y - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_x1 = (((event.y + circle_diameter*scaling_factor) - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_y1 = (((event.y + circle_diameter*scaling_factor) - border) * center_distance / c_width - center_distance/2) / scaling_factor
for node in g.nodes:
if node.coordinate_x > coordinate_x0 and node.coordinate_x < coordinate_x1 and node.coordinate_y > coordinate_y0 and node.coordinate_y < coordinate_y1:
global grabed_component
global grabed_node
grabed_component = node.cc_number
grabed_node = node.id
def event_button3_motion(event):
# recalculate the position into node-coordinates
coordinate_x0 = ((event.x - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_y0 = ((event.y - border) * center_distance / c_height - center_distance/2) / scaling_factor
coordinate_x1 = (((event.x + circle_diameter*scaling_factor) - border) * center_distance / c_width - center_distance/2) / scaling_factor
coordinate_y1 = (((event.y + circle_diameter*scaling_factor) - border) * center_distance / c_height - center_distance/2) / scaling_factor
global grabed_component
global grabed_node
if grabed_component == '' and grabed_node == '':
for node in g.nodes:
if node.coordinate_x > coordinate_x0 and node.coordinate_x < coordinate_x1 and node.coordinate_y > coordinate_y0 and node.coordinate_y < coordinate_y1:
grabed_component = node.cc_number
grabed_node = node.id
else:
# calculate the position-difference for the grabed node
node = g.getNode(grabed_node)
distance_x = coordinate_x0 - node.coordinate_x
distance_y = coordinate_y0 - node.coordinate_y
for node in g.nodes:
if node.cc_number == grabed_component:
node.coordinate_x += distance_x
node.coordinate_y += distance_y
radius = sqrt(distance_x*distance_x + distance_y*distance_y)
if radius != 0:
node.velocity = [distance_x, distance_y]
# ensure maximum velocity
if (node.velocity[0] > velocity_maximum):
node.velocity[0] = velocity_maximum
if (node.velocity[1] > velocity_maximum):
node.velocity[1] = velocity_maximum
if (node.velocity[0] < -1*velocity_maximum):
node.velocity[0] = -1*velocity_maximum
if (node.velocity[1] < -1*velocity_maximum):
node.velocity[1] = -1*velocity_maximum
def event_button3_released(event):
global status_message
global grabed_component
global grabed_node
grabed_component = ''
grabed_node = ''
def key_pressed(event):
global grabed_node
global grabed_component
if event.char == ' ':
if grabed_node != '':
if grabed_component == '':
# change movable-state of this node
node = g.getNode(grabed_node)
node.movable = 1 - node.movable
else:
# change movable-state of this component
node_movable = g.getNode(grabed_node).movable
print node_movable
for node in g.nodes:
if node.cc_number == grabed_component:
node.movable = 1 - node_movable
if event.char == 'e':
global show_energie_in_background
show_energie_in_background = 1 - show_energie_in_background
if event.char == 'i':
global show_textinformation_in_background
show_textinformation_in_background = 1 - show_textinformation_in_background
# read lines of file with graph data
# print
# print 'reading file', filename, 'with graph data ...'
# try:
# f1 = open(filename, 'r')
# rows_graph = f1.readlines()
# f1.close()
# except IOError:
# print filename, 'could not be opened'
# sys.exit(1)
# parse lines and build graph
print 'creating graph ...'
g = Graph()
# find the line where the graph starts
for hub in hub_dict.keys():
# Put the key in as a node
g.addNode(hub)
connections = hub_dict[hub]
for word in connections:
g.addNode(word)
g.addEdge(hub, word)
# calculate the connected components:
g.calculateConnectedComponents()
# set the position of all nodes in the graph randomly to
# a number between 0 and 10
g.SetRandomNodePosition()
# create the window object for painting the graph on
root = tk.Tk()
# make it cover the entire screen
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(1)
root.geometry("%dx%d+0+0" % (w, h))
c_width = w - border*2
c_height = h - border*2
root.title("Force directed layout of graphs (by Mathias Bader) - version 0.1")
c = tk.Canvas(root, width=c_width+2*border, height=c_height+2*border, bg='white')
# left-click
c.bind("<Button-1>", event_button1_pressed)
c.bind("<B1-Motion>", event_button1_motion)
c.bind("<ButtonRelease-1>", event_button1_released)
# right-click
c.bind("<Button-3>", event_button3_pressed)
c.bind("<B3-Motion>", event_button3_motion)
c.bind("<ButtonRelease-3>", event_button3_released)
# keyboard key
c.bind("<Key>", key_pressed)
c.pack()
c.focus_set()
g.paintGraph()
while (not g.calculation_finished or dont_finish_calculating):
g.calculateStep()
timestep += 1
g.paintGraph()
g.paintGraph()
c.mainloop()
|
sean9keenan/WordWeb
|
generator/force_directed_graph_layout.py
|
Python
|
mit
| 37,691
|
[
"Octopus"
] |
1b9ba121e407d102ffbc6b71f8ffbf073214754e813c3a49b473622d5e4d5cd9
|
"""
intro_3_05_variable_names_2.py
Example code from Section 3.5 of <Introduction to SFC Models Using Python.>
Demonstration how variable names are built up.
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Imports
# This next line looks bizarre, but is needed for backwards compatibility with Python 2.7.
from __future__ import print_function
import sfc_models
from sfc_models.models import Model, Country
from sfc_models.sector import Sector
mod = Model()
can = Country(mod, 'CA', 'Canada')
# has_F=False: turns off creation of financial asset variables.
sector_yy = Sector(can, 'YY', has_F=False)
sector_yy.AddVariable('W', 'Variable W <constant>', '4.0')
sector_yy.AddVariable('Y', 'Variable Y - depends on local variable', '2*W')
# Only the next two lines have changed: put sector_xx into a another Country
us = Country(mod, 'US')
sector_xx = Sector(us, 'XX', has_F=False)
variable_name = sector_yy.GetVariableName('Y')
print("Name of variable before binding:", variable_name)
# format: inserts variable_name where {0} is
eqn = '{0} + 2.0'.format(variable_name)
sector_xx.AddVariable('X', 'Variable x; depends on other sector', eqn)
# Bind the model; solve
eqns = mod.main()
print(eqns)
print('Variable name after binding:', sector_yy.GetVariableName('Y'))
|
brianr747/SFC_models
|
sfc_models/examples/scripts/intro_3_05_variable_names_2.py
|
Python
|
apache-2.0
| 1,786
|
[
"Brian"
] |
6a060be6970dd34a120254ce0fca6ac73fee8e9abed3513f54109186f53667df
|
#This is the final part of the C4 QA2 worklow (after the last DRM check).
'''
Part 0
Calibration
Part 1
Step 2 (Calibration Weblog Review)
Step 3 (Update tracking systems) -- provides the text to put into systems
Step 4 (Determine if pipeline is appropriate) -- currently no checks? needs to be built out
Step 5 (Run Imaging Pipeline)
Step 6 (Imaging pipeline weblog review)
Step 7 (Prepage SB for QA2 assessment)
Step 8 (QA2 assessment) -- provides README content and the text to put into systems
DRM check
Part 2
Step 9 (QA2 results)
Step 10 (-analysis & tarball creation)
Step 11 (Delivery)
DRM check
Part 3
Step 12:
Moves tarball to Cycle_X Release Directory
Moves -analysis to deliveries directory
Prints delivery (or reingestion) email stamp
Creates calibrated_final.ms, puts it in users workspace and creates P2G stamp
To do: Detect when delivered and remove directory containing pipeline output
'''
# --------------------------------------------------------------------------------------------------
def ReadDataFromWeb(mousid):
# --------------------------------------------------------------------------------------------------
"""
This function reads the metadata about the project for the given MOUS
"""
import urllib2
dataurl="http://www.eso.org/~fstoehr/project_ous_eb_hierarchy.txt"
dataurl2="http://www.eso.org/~fstoehr/ous_eb_qa0status.txt"
#print "Gathering metadata..."
response = urllib2.urlopen(dataurl)
html = response.read().splitlines()
response = None
mousid = str(mousid)
datadict = {}
datadict = {'mous':mousid}
mousid2=mousid.replace("___","://").replace("_","/")
for line in html:
line=line.split()
#print line[0],line[1],line[2],line[3],line[4]
if line[4]==mousid:
#print "found MOUS"
#print line[0]
datadict['code']=line[0]
datadict['sgous']=line[2]
datadict['gous']=line[3]
datadict['mous']= line[4]
if datadict.has_key('sbuids'):
datadict['sbuids'].append(line[9])
else:
datadict['sbuids']=[line[9]]
if datadict.has_key('sbnames'):
datadict['sbnames'].append(line[10])
else:
datadict['sbnames']=[line[10]]
response2 = urllib2.urlopen(dataurl2)
html2 = response2.read().splitlines()
response2 = None
for line2 in html2:
line2=line2.split("|")
if line2[2]=='SemiPass':
continue
if line2[0]==mousid2:
if datadict.has_key('ebuids'):
datadict['ebuids'].append(line2[1])
else:
datadict['ebuids']=[line2[1]]
return datadict
# --------------------------------------------------------------------------------------------------
def DirectoryTree(path):
# --------------------------------------------------------------------------------------------------
'''
This maps the directory tree of the given path
'''
import os
import glob
os.chdir(path)
sous_dir = glob.glob('SOUS*') #figuring out SOUS and cd'ing into it
os.chdir('%s' % sous_dir[0])
gous_dir = glob.glob('GOUS*')
os.chdir('%s' % gous_dir[0])
mous_dir = glob.glob('MOUS*')
os.chdir('%s' % mous_dir[0])
project_sgm_dir = os.getcwd() #storing the whole path for later if ever needed
#continuing down to the HTML area to get the weblog directory
os.chdir('working')
pipeline_runs = sorted(glob.glob('pipeline-*/'), key=os.path.getmtime) #this sorts the glob list from oldest-to-newest
pipeline_dir = pipeline_runs[-1] #-1 selects the newest
os.chdir('%s/html' % pipeline_dir)
project_html_dir = os.getcwd()
return project_sgm_dir, project_html_dir
# --------------------------------------------------------------------------------------------------
def ProprietaryAccess(project_html_dir, project_sgm_dir, datadict, username):
# --------------------------------------------------------------------------------------------------
'''
This takes care of putting the SRDP (calibrated_final.ms) in the proprietary area for the PI to pickup
'''
import os
import glob
import sys
#gathering the PI name by loading that html page: finding the index of the PI and then stripping off the html tags
os.chdir(project_html_dir)
main_page = open(glob.glob('t1-1.html')[0]).readlines()
line_index = 0
for line in main_page:
if 'Principal Investigator' in line:
pi_index = line_index + 1
pi_code = main_page[pi_index].strip().split('>')[1].split('<')[0]
line_index = line_index + 1
#creating a directory for this PI in the area they can download it (if it doesn't already exist)
os.chdir('/lustre/naasc/ALMA_Data_Delivery/proprietary/')
if os.path.isdir('%s' % pi_code) == False:
os.mkdir('%s' % pi_code)
#checking and creating/appending an .htaccess file
os.chdir('%s' % pi_code)
if os.path.isfile('.htaccess') == False:
htaccess = open('.htaccess', 'a')
htaccess.write('AuthType CAS\n')
htaccess.write('Require user %s %s dckim cbrogan aremijan ksharp cubach swood pmurphy teuben\n' % (pi_code, username))
htaccess.write('Order deny,allow\n')
htaccess.write('AuthName \"Authentication Required\"\n')
htaccess.close()
if os.path.isfile('.htaccess') == True:
#if the file exists, see if the current user is already listed for access
htaccess = open('.htaccess').readlines()
for line in htaccess:
if username in line:
continue #this means the user is already listed and nothing needs to be done
else: #their name needs to be added; so lets write a new file and add the name
if 'user' in line:
current_users = line
new_users = current_users.strip()+' %s' % username
new_htaccess = open('.htaccess_new', 'a')
new_htaccess.write('AuthType CAS\n')
new_htaccess.write(new_users+'\n')
new_htaccess.write('Order deny,allow\n')
new_htaccess.write('AuthName \"Authentication Required\"\n')
new_htaccess.close()
#putting the new file in place of the old one
os.system('mv -f .htaccess .htaccess_old')
os.system('mv -f .htaccess_new .htaccess')
#creating a directory in the PI area for this specific MOUS; in the case it does already exist (not sure why) then continue on
os.chdir('/lustre/naasc/ALMA_Data_Delivery/proprietary/%s' % pi_code)
if os.path.isdir('%s' % datadict['mous'].split('_')[-1]) == False:
os.mkdir('%s' % datadict['mous'].split('_')[-1])
os.chdir('%s' % datadict['mous'].split('_')[-1])
mous_dir = datadict['mous'].split('_')[-1]
delivery_path = os.getcwd()
#moving the calibrated_final.ms.tar into this directory
os.chdir('%s/working' % project_sgm_dir)
os.system('mv calibrated_final.ms.tar %s' % delivery_path)
#checking the directory and file are there:
if os.path.exists('/lustre/naasc/web/almadata/proprietary/%s/%s/calibrated_final.ms.tar' % (pi_code, mous_dir)) == False:
raw_input('Error: package not in delivery area')
else:
SRDP_path = '/lustre/naasc/web/almadata/proprietary/%s/%s/calibrated_final.ms.tar' % (pi_code, mous_dir)
return mous_dir, pi_code, SRDP_path
# --------------------------------------------------------------------------------------------------
#MAIN
# --------------------------------------------------------------------------------------------------
import os
import sys
import webbrowser
# ---------------------------
#prerequisites for stuff
username = raw_input('What is your lustre username?:').strip()
mous = raw_input('What is the MOUS ID? (from SCOPS-ticket):').strip()
path = raw_input('Enter path to the pipeline output:').strip()
mous2=mous.replace(':','_').replace('/','_')
datadict = ReadDataFromWeb(mous2)
output = datadict['code']+'.MOUS.'+mous2+'.SBNAME.'+datadict['sbnames'][0].strip('"')+'-analysis'
package_sgm_dir = output+'/sg_ouss_id/group_ouss_id/member_ouss_id'
project_sgm_dir, project_html_dir = DirectoryTree(path)
# ---------------------------
#moving tarball to the cycleX_release
os.chdir('/lustre/naasc/sciops/qa2/%s/Packages' % username)
if datadict['code'].startswith('2016'):
cycle = '4'
os.system('mv %s_%s_001_of_001.tar /lustre/naasc/sciops/cycle4_release' % (datadict['code'], datadict['mous']))
if datadict['code'].startswith('2015'):
cycle = '3'
os.system('mv %s_%s_001_of_001.tar /lustre/naasc/sciops/cycle3_release' % (datadict['code'], datadict['mous']))
# ---------------------------
#moving *-analysis package to deliveries
os.chdir('/lustre/naasc/sciops/qa2/%s' % username)
os.system('mv %s /lustre/naasc/sciops/deliveries' % output)
# ---------------------------
#Print out for helpdesk ticket
print """\n\n\nFor normal delivery:
To: helpdesk-cv@nrao.edu
CC: mlacy, cubach, jmangum, dkunneri
Subject: Cycle %s data for ingestion: %s, MOUS: %s
Content:
Please upload the following file to JAO:
File Path: /lustre/naasc/sciops/cycle%s_release/%s_%s_001_of_001.tar
Project code: %s
GOUS: %s
MOUS: %s
SBName: %s
SBuid: %s
ASDMs: %s
Thanks!\n\n\n""" % (cycle, datadict['code'], datadict['mous'], cycle, datadict['code'], mous2, datadict['code'], datadict['gous'], datadict['mous'], datadict['sbnames'][0].strip('"'), datadict['sbuids'][0].strip('"'), datadict['ebuids'][0].strip('"'))
print """For re-delivery (re-ingestion):
Create an APO ticket: http://jira.alma.cl/projects/APO
Set Issue Type as ARCHIVE
Summary: Cycle %s data for RE-ingestion: %s, %s
Category: Data Project Ingestion
Assignee: Jose Parra
Description: Cycle %s data for ingestion: %s, MOUS: %s
Please upload the following file to JAO:
File Path: /lustre/naasc/sciops/cycle%s_release/%s_%s_001_of_001.tar
Project code: %s
GOUS: %s
MOUS: %s
SBName: %s
SBuid: %s
ASDMs: %s
Thanks!
Additional users to email: alejandro.barrientos@alma.cl, bernardo.malet@alma.cl, cubach@nrao.edu, jotey@nrao.edu, mhatz@nrao.edu, mlacy@nrao.edu, nicolas.gonzalez@alma.cl\n\n\n""" % (cycle, datadict['code'], datadict['mous'], cycle, datadict['code'], datadict['mous'], cycle, datadict['code'], mous2, datadict['code'], datadict['gous'], datadict['mous'], datadict['sbnames'][0].strip('"'), datadict['sbuids'][0].strip('"'), datadict['ebuids'][0].strip('"'))
# ---------------------------
#The final steps: needed webpages are opened for you
mous_dir, pi_code, SRDP_path = ProprietaryAccess(project_html_dir, project_sgm_dir, datadict, username)
raw_input('Once JAO ingests the package into the archive an automated delivery email is sent to the PI and to data_delivery@alma.cl. Press enter when notification email is received to start the final process:\n\n\n')
print('Loading webpages...')
webbrowser.open('http://help.almascience.org')
webbrowser.open('http://rcmail.cv.nrao.edu')
print """
Dear PI,
You should have recently received an email announcing that data for member ObsUnitSet %s of your project %s are now available for download through the ALMA Science Portal Request Handler.
For your convenience, a fully-calibrated MS is also available for download through the NAASC Web server. Unlike the data obtained through the Request Handler, these calibrated data will only be available for the next 30 days. A concatenated measurement set containing only target data is available as calibrated_final.ms. Your data may be found here:
https://bulk.cv.nrao.edu/almadata/proprietary/%s/%s/calibrated_final.ms.tar
In addition, we would like to recommend to you the services that NRAO can provide to assist you in the analysis of your data. We welcome visits from PIs who would like to rereduce their data from the original raw files, and from PIs who would like advice on how to further interpret and display the images they have received. Financial support for travel costs is available for PIs based in the US. Even if you don't feel you need to visit but have a few questions, we have scientists available who can talk with you by phone or video connection, or respond to email and Helpdesk queries.
With kind regards,
The North American ALMA Archive at the NAASC\n\n\n""" % (mous, datadict['code'], pi_code, mous_dir)
raw_input('1.) Post the previous stamp to the P2G Helpdesk Ticket and email the stamp to teuben@astro.umd.edu. Enter to continue\n')
webbrowser.open('https://asa.alma.cl/protrack/')
raw_input('2.) Update Project Tracker: change MOUS state \"Delivered\". Enter to continue\n')
webbrowser.open('https://webtest2.cv.nrao.edu/php/pfisher/drspreadCycle%s.php' % cycle)
raw_input('3.) Update DrSpreadSheet: set Delivery Date and QA2 status. Enter to continue\n')
raw_input('4.) Press Enter to move pipeline output directory to deliveries')
os.system('mv %s /lustre/naasc/sciops/deliveries/' % path)
raw_input('5.) Press Enter for the final SCOPS stamp and you\'re finished.')
print '''<DRM>, for %s,
1.) the delivery letter to the PI has been sent
2.) the project tracker has been updated
3.) the DRspreadsheet has been updated
4.) the dataset has been moved to /lustre/naasc/sciops/deliveries
''' % datadict['sbnames'][0]
#For bookkeeping purposes, please record the delivery date and QA2 status in the following ways:
#Cycle 4: Open DrSpreadSheet and update the QA2 Status and Date Delivered columns.
#For Cycle 3: Open DrSpreadSheet and update the QA2 Status and Date Delivered columns.
#For Cycle 2: Open ALMA Cycle 2 Reduction Status - NA, go to the SB View tab and update the QA2 Status and Date of PI Letter columns.
#For Cycle 1: Open ALMA Cycle 1 reduction status - NA, go to the EB View tab and update the QA2 Status column. Then, go to the SB View tab and update the QA2 Status and Date of PI Letter columns.
|
bmarshallk/NAASC
|
cycle_scripts/C4Part3.py
|
Python
|
gpl-3.0
| 13,577
|
[
"VisIt"
] |
76b18f9e7efe598eada8bd95d4fb336804896af72af180f1a7ad0e758441c2b5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lulesh(MakefilePackage):
"""LULESH is a highly simplified application, hard-coded to only
style typical in scientific C or C++ based applications. Hard
code to only solve a Sedov blast problem with analytic answer
"""
tags = ['proxy-app']
homepage = "https://computing.llnl.gov/projects/co-design/lulesh"
git = "https://github.com/LLNL/LULESH.git"
version('2.0.3', tag='2.0.3')
variant('mpi', default=True, description='Build with MPI support')
variant('openmp', default=True, description='Build with OpenMP support')
variant('visual', default=False,
description='Build with Visualization support (Silo, hdf5)')
depends_on('mpi', when='+mpi')
depends_on('silo', when='+visual')
depends_on('hdf5', when='+visual')
@property
def build_targets(self):
targets = []
cxxflag = ' -g -O3 -I. '
ldflags = ' -g -O3 '
if '~mpi' in self.spec:
targets.append('CXX = {0} {1}'.format(spack_cxx, ' -DUSE_MPI=0 '))
else:
targets.append(
'CXX = {0} {1}'.format(self.spec['mpi'].mpicxx,
' -DUSE_MPI=1'))
targets.append(
'MPI_INC = {0}'.format(self.spec['mpi'].prefix.include))
targets.append('MPI_LIB = {0}'.format(self.spec['mpi'].prefix.lib))
if '+visual' in self.spec:
targets.append(
'SILO_INCDIR = {0}'.format(self.spec['silo'].prefix.include))
targets.append(
'SILO_LIBDIR = {0}'.format(self.spec['silo'].prefix.lib))
cxxflag = ' -g -DVIZ_MESH -I${SILO_INCDIR} '
ldflags = ' -g -L${SILO_LIBDIR} -Wl,-rpath -Wl, '
ldflags += '${SILO_LIBDIR} -lsiloh5 -lhdf5 '
if '+openmp' in self.spec:
cxxflag += self.compiler.openmp_flag
ldflags += self.compiler.openmp_flag
targets.append('CXXFLAGS = {0}'.format(cxxflag))
targets.append('LDFLAGS = {0}'.format(ldflags))
return targets
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('lulesh{0}'.format(self.version.up_to(2)), prefix.bin)
mkdirp(prefix.doc)
install('README', prefix.doc)
install('TODO', prefix.doc)
|
iulian787/spack
|
var/spack/repos/builtin/packages/lulesh/package.py
|
Python
|
lgpl-2.1
| 2,523
|
[
"BLAST"
] |
3a63d978b4e5bac2cc140acea5a128ff2742989e7d164f423f5e3032910d4bf6
|
import os
from optparse import make_option
from Bio.Blast import NCBIXML
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from tasm.models import Assembly, Transcript, Locus, RefSeq, BlastHit, BASE_REFSEQ_URL
class Command(BaseCommand):
'''
Imports results of the local blast run on assembly's transcripts.fa
file.
'''
option_list = BaseCommand.option_list + (
make_option('--asm', default='', dest='asm',
help='Assembly'),
make_option('--expect', default='1e-4', dest='expect',
help='Expect value cutoff'),
make_option('--max_hits', default='1', dest='max_hits',
help='Maximum number of hits to import per transcript'),
)
args = '<blastout.xml>'
refseqs = []
blasthits = []
def set_options(self, **options):
'''
Set instance variables based on options dict
'''
try:
self.max_hits = int(options['max_hits'])
except ValueError:
raise CommandError('max_hits must be integer.')
try:
self.expect = float(options['expect'])
except ValueError:
raise CommandError('Expect value must be float.')
try:
self.asm = Assembly.objects.get(identifier=options['asm'])
except ObjectDoesNotExist:
raise CommandError('Unknown assembly: {asm}.'.format(asm=options['asm']))
def _import_blast_record(self, record):
'''
Handles import of a single BLAST record. Adds records to
self.refseqs and self.blasthits to be used in bulk_create
later.
RefSeq instances are created as needed.
BlastHit instances are created in bulk in self.handle()
'''
if not record.alignments:
return
query_bits = record.query.split('_')
locus = Locus.objects.get(locus_id=int(query_bits[1]), assembly=self.asm)
transcript = Transcript.objects.get(
locus=locus,
transcript_id=int(query_bits[3].split('/')[0]))
i = 0
while i < self.max_hits:
# FIXME: This is slow because it hits the database (almost)
# every iteration. Needs to be improved, not clear how.
aln = record.alignments[i]
refseq, created = RefSeq.objects.get_or_create(accession=aln.accession,
defaults={
'definition': aln.hit_def,
'length': aln.length,
'url': BASE_REFSEQ_URL + aln.accession
})
self.refseqs.append(refseq)
best_hsp = aln.hsps[0]
self.blasthits.append(BlastHit(
transcript=transcript,
refseq=refseq,
align_length=best_hsp.align_length,
identities=best_hsp.identities,
expect=best_hsp.expect,
score=best_hsp.score
))
i += 1
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('Invalid number of arguments.')
blast_file = args[0]
if not os.path.exists(blast_file):
raise CommandError('File {file} not found.'.format(file=blast_file))
self.set_options(**options)
self.stdout.write('Importing BLAST results for assembly %s ...' % self.asm)
with open(blast_file, 'rU') as fi:
for rec in NCBIXML.parse(fi):
self._import_blast_record(rec)
self.stdout.write('Accepted {hits} for {seqs} sequences.'.format(
hits=len(self.blasthits),
seqs=len(self.refseqs)
))
self.stdout.write('Importing BLAST hits ...')
BlastHit.objects.bulk_create(self.blasthits)
self.stdout.write('DONE.')
|
eco32i/tweed
|
tasm/management/commands/import_blast.py
|
Python
|
bsd-3-clause
| 3,872
|
[
"BLAST"
] |
b570646f2614c0e97003ca7060a701457876bff15a72dbdd7272387a2ac4ace5
|
# - Coding UTF8 -
#
# Networked Decision Making
# Development Sites (source code):
# http://code.google.com/p/global-decision-making-system/
# http://github.com/NewGlobalStrategy/NetDecisionMaking
#
# Demo Sites (Google App Engine)
# http://netdecisionmaking.appspot.com
# http://globaldecisionmaking.appspot.com
#
# License Code: MIT
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King (newglobalstrategy@gmail.com
# Russ also blogs occasionally to pass the time at:
# http://proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is available at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
# With thanks to Guido, Massimo and many other that make this sort of thing
# much easier than it used to be
# This controller provides details about network decision making
# access to the FAQ and allows generation of a general message
# on what we are looking to achieve
# The Press Release Note for the latest version is also now included
# and some basic capabilities to download actions have also been added
def index():
return dict(message="all done in the view")
def privacy():
return dict(message="all done in the view")
def faq():
return dict(message="all done in the view")
def pr():
return dict(message="all done in the view")
def enhance():
return dict(message="all done in the view")
def stdmsg():
messagerow = db(db.message.msgtype == 'std').select(
db.message.message_text).first()
if messagerow is None:
message = 'You have not setup any std messages yet'
else:
message = messagerow.message_text
return dict(message=message)
def download():
downloads = db().select(db.download.ALL, orderby=db.download.title, cache=(cache.ram, 1200), cacheable=True)
return dict(downloads=downloads)
def getfile():
return response.download(request, db)
|
NewGlobalStrategy/NetDecisionMaking
|
controllers/about.py
|
Python
|
mit
| 2,064
|
[
"VisIt"
] |
41d731f394370d56c7e931d79fc8b1f37fa68a45da96c9590a09ff222fceb429
|
"""
Created by: Bryce Chung (neuRowsATL)
Last Modified: May 20, 2016 (version 3)
Description: This class opens and saves AnimatLab models from .aproj files.
Modified August 23 2017 Daniel Cattaert
ActualizeAproj now include StartTime and EndTime of externalStimuli
modified September 19, 2017 (D. Cattaert, C. Halgand):
procedure saveXML modified to avoid problems whith "." in path
if overwrite:
if fileName == '':
fileName = self.asimFile
else:
fileName = os.path.join(os.path.dirname(self.asimFile),
os.path.splitext(fileName)[0]+'.asim')
else:
saveDir = os.path.dirname(self.asimFile)
rootName = os.path.basename(os.path.splitext(self.asimFile)[0])
oldname = rootName + '*.asim'
ix = len(glob.glob(os.path.join(saveDir, oldname)))
newname = rootName + '-%i.asim' % ix
fileName = os.path.join(saveDir, newname)
procedure saveXMLaproj changed in the same way
modified November 09, 2017 (D. Cattaert):
class AnimatLabSimFile has been completed to include motor elements
for getElementByType("MotorPosition") and getElementByType("MotorVelocity")
These facilities are used in "optimization.py" in affichMotor() function
"""
# Import dependencies
import os
import glob
import numpy as np
from FoldersArm import FolderOrg
import xml.etree.ElementTree as elementTree
global verbose
verbose = 0
# # ===== ===== ===== ===== =====
# # ===== ===== ===== ===== =====
class AnimatLabModelError(Exception):
"""
This class manages errors thrown by the AnimatLabModel class.
Right now, this class does nothing other than print an error message.
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
def __init__(self, value):
"""
__init__(value)
Set the value of the error message.
"""
self.value = value
def __str__(self):
"""
__str__()
Returns the error message.
"""
return repr(self.value)
# # ===== ===== ===== ===== =====
# # ===== ===== ===== ===== =====
class AnimatLabModel(object):
"""
AnimatLabModel(folder='', asimFile='')
API class that uploads, saves, and manages an AnimatLab simulation file.
Using this class, you can load a simulation file and view its parameters,
change its parameters, or use it to generate and save a new simulation file
with different parameters.
folder Specifies folder for AnimatLab project files
asimFile Specifies .asim file for AnimatLab model
get_aproj() Get .aproj file path string
get_asim() Get .asim file path string
get_xmlRoot() Get root XML element from parsed .asim file
set_aproj(filePath) Set path string for .aproj file
set_asim(filePath) Set path string for .asim file
getElementByType(elType) Get element(s) by AnimatLab type
getElementByName(elName) Get element(s) by AnimatLab name
getElementByID(elID) Get element by AnimatLab ID
"""
def __init__(self, folder='', asimFile=''):
"""
__init__(folder='', asimFile='')
Initializes an AnimatLabModel class object.
folder Specifies full folder path for AnimatLab project files
asimFile Specifies full .asim file path for AnimatLab model
If no folder is specified, the object will default to the current
working directory.
If no asimFile is specified, the object will search for an .asim file
in the model folder with the character string, "_Standalone". If no
file exists as "*_Standalone.asim" then the object will look for any
file with the .asim extension.
Last updated: December 28, 2015
Modified by: Bryce Chung <bchung4@student.gsu.edu>
"""
# # Set root folder for AnimatLab model resource files
if folder == '':
self.projectFolder = os.getcwd()
else:
self.projectFolder = folder
try:
# # Check for AnimatLab project file
aprojFile = glob.glob(os.path.join(self.projectFolder, '*.aproj'))
if len(aprojFile) == 0:
error = "No AnimatLab project file exists with extension " \
"*.aproj in folder: %s" +\
" Check AnimatLab project folder for consistency."
raise AnimatLabModelError(error % self.projectFolder)
elif len(aprojFile) > 1:
error = "Multiple AnimatLab aproj files exist with extension" \
" *.aproj in folder: %s" +\
" Check AnimatLab project folder for consistency."
raise AnimatLabModelError(error % self.projectFolder)
self.aprojFile = aprojFile[0]
aprojFile = os.path.split(self.aprojFile)[-1]
projectFileName = os.path.splitext(aprojFile)[0]
self.aprojFile = os.path.join(self.projectFolder, aprojFile)
if asimFile != '':
# # Check to see if AnimatLab asimfile exists if specified
if os.path.isfile(os.path.join(self.projectFolder, asimFile)):
self.asimFile = os.path.join(self.projectFolder, asimFile)
else:
error = "Specified AnimatLab simulation file does not " \
"exist: %s"
raise AnimatLabModelError(error % os.path.join(
self.projectFolder, asimFile))
else:
# # Try to find default AnimatLab simulation files...
if os.path.isfile(
os.path.join(self.projectFolder,
projectFileName+'_Standalone.asim')):
self.asimFile = os.path.join(
self.projectFolder, projectFileName+'_Standalone.asim')
elif len(glob.glob(os.path.join(
self.projectFolder, '*.asim'))) == 1:
self.asimFile = os.path.join(self.projectFolder, '*.asim')
elif len(glob.glob(os.path.join(
self.projectFolder, '*.asim'))) == 0:
error = "No standalone simulation file exists with " \
"extension *.asim in folder: %s" \
" Generate a standalone simulation file from " \
"the AnimatLab GUI"
txt = error % self.projectFolder
raise AnimatLabModelError(txt)
else:
error = "Multiple simulation files exist with extension "\
"*.asim in folder %s" +\
" Delete duplicates and leave one file or "\
"initiate AnimatLabModel object with ASIM file "\
"specified"
raise AnimatLabModelError(error)
except AnimatLabModelError as e:
print "Error initializing AnimatLab model object:\n\n %s" % e.value
raise
if verbose > 0:
print "\nUsing AnimatLab Project File:\n%s" % self.aprojFile
print "\nUsing AnimatLab Simulation File:\n%s" % self.asimFile
# Set up lookup table for asim model elements
self.tree = elementTree.parse(self.asimFile)
root = self.tree.getroot()
lookupType = []
lookupID = []
lookupName = []
lookupElement = []
def lookupAppend(el, elType):
lookupType.append(elType)
lookupID.append(el.find("ID").text)
lookupName.append(el.find("Name").text)
lookupElement.append(el)
def lookupAppend2(el, elType):
lookupType.append(elType)
lookupID.append(el.find("ID").text)
s = (el.find("SourceID").text)
t = (el.find("TargetID").text)
sm = np.array(lookupElement)[np.where(np.array(lookupID) == s)[0]]
if len(sm) == 1:
sname = sm[0].find("Name").text
tm = np.array(lookupElement)[np.where(np.array(lookupID) == t)[0]]
if len(tm) == 1:
tname = tm[0].find("Name").text
# lookupName.append(s + "*" + t)
lookupName.append(sname + "*" + tname)
lookupElement.append(el)
def lookupAppend3(el, elType):
lookupType.append(elType)
lookupID.append(el.find("ID").text)
lookupName.append(el.find("ColumnName").text)
lookupElement.append(el)
def lookupAppend4(el, elType, NeuID):
lookupType.append(elType)
lookupID.append(el.find("ID").text)
s = (el.find("FromID").text)
t = NeuID
sm = np.array(lookupElement)[np.where(np.array(lookupID) == s)[0]]
if len(sm) == 1:
sname = sm[0].find("Name").text
tm = np.array(lookupElement)[np.where(np.array(lookupID) == t)[0]]
if len(tm) == 1:
tname = tm[0].find("Name").text
# lookupName.append(s + "*" + t)
lookupName.append(sname + "*" + tname)
lookupElement.append(el)
######################################################################
"""
modified August 30, 2017 (D. Cattaert) to handle Joints parameters
"""
def analyzeChilbodies(rb, level):
txt = ""
rbfound = 0
chrblist = []
for n in range(level):
txt += "\t"
# print txt, level, rb, rb.find("Name").text
el = rb.find("Joint")
if el is not None:
print txt + el.find("Name").text
lookupAppend(el, "Joint")
elt = rb.find("ChildBodies")
if elt is not None:
rbfound = 1
chrblist = list(elt)
# if rbfound == 0:
# print txt + "No childbodies"
# if rbfound == 1:
# print txt + "childbodies found",
# print txt, level, chrblist
return [rbfound, chrblist]
print "\nREADING .asim elements..."
"""
modified August 30, 2017 (D. Cattaert) to handle Joints parameters
"""
level = 0
rbfound = 0
subfound = 0
childRbNb = 0
nbsub = 1 # the firt list of rigid bodies
subchrblist = []
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for rigidbodyelmt in list(organism.find("RigidBody")):
# print rigidbodyelmt
if list(rigidbodyelmt) != []:
# print list(rigidbodyelmt)
subfound = 1
rbeltlist = list(rigidbodyelmt)
subchrblist.append(rbeltlist)
childRbNb = 0
# number of child RigidBodies
while subfound:
for ch in range(nbsub):
childRbNb = 0
subfound = 0 # flag to indicate a child rb exists
# first looks for all childbodies from same parent
for rb in subchrblist[level+ch]:
[rbfound, chrblist] = analyzeChilbodies(rb,
level)
if rbfound:
childRbNb += 1
subfound = 1
# each time childbodies are found, the list
# is added to the subchrblist
subchrblist.append(chrblist)
nbsub = childRbNb
# ... continues the analysis of the parent
if subfound: # once the parent has been scaned,
level += 1 # and childbodies found, each child
# becomes parent: the process starts again
######################################################################
for el in list(root.find("ExternalStimuli")):
if el.find("Type").text == "MotorPosition":
lookupAppend(el, "MotorPosition")
for el in list(root.find("ExternalStimuli")):
if el.find("Type").text == "MotorVelocity":
lookupAppend(el, "MotorVelocity")
for el in list(root.find("ExternalStimuli")):
if el.find("Type").text == "Current":
lookupAppend(el, "ExternalStimuli")
# path = "Environment/Organisms/Organism/NervousSystem/NeuralModules"
# modules = root.find(path).getchildren()
# for module in modules:
"""
# modified by Daniel Cattaert May 2016
in order to allow this module to work when a second organism is added
the three lines above have been replaced in the three next lines
"""
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
# print list(ns)
for mod in ns:
if mod.find("ModuleName").text == 'IntegrateFireSim':
for el in list(mod.find("Neurons")):
lookupAppend(el, "Neurons")
elif mod.find("ModuleName").text == 'PhysicsModule':
for el in list(mod.find("Adapters")):
lookupAppend(el, "Adapters")
# path = "Environment/Organisms/Organism/NervousSystem/NeuralModules" \
# + "/NeuralModule/Synapses/SpikingSynapses"
# modules = root.find(path).getchildren()
# for el in modules:
# lookupAppend(el, "SpikingSynapses")
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
# print list(ns)
for mod in ns:
if mod.find("ModuleName").text == 'IntegrateFireSim':
for syn in list(mod.find("Synapses")):
# print syn
for el in syn:
lookupAppend(el, "Synapses")
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
# print list(ns)
for mod in ns:
if mod.find("ModuleName").text == 'IntegrateFireSim':
for el in list(mod.find("Connexions")):
lookupAppend2(el, "Connexions")
for el in list(root.find("DataCharts")):
lookupAppend(el, "Chart")
"""
path = "DataCharts/DataChart/DataColumns"
modules = list(root.find(path))
for el in modules:
lookupAppend3(el, "ChartcolName")
"""
ch = 0
for module in list(root.find("DataCharts")):
print module.find("Name").text
for el in list(module.find("DataColumns")):
typ = "ChartCol" + str(ch)
lookupAppend3(el, typ)
ch += 1
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
# print list(ns)
for module in ns:
if module.find("ModuleName").text == 'FiringRateSim':
for el in list(module.find("Neurons")):
lookupAppend(el, "NeuronsFR")
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
# print list(ns)
for module in ns:
if module.find("ModuleName").text == 'FiringRateSim':
for el in list(module.find("Neurons")):
NeurID = el.find("ID").text
for syn in list(el.find("Synapses")):
lookupAppend4(syn, "SynapsesFR", NeurID)
self.lookup = {}
self.lookup["Type"] = lookupType
self.lookup["ID"] = lookupID
self.lookup["Name"] = lookupName
self.lookup["Element"] = lookupElement
#
# ===============================================
# Set up lookup table for aproj model elements
# ===============================================
self.aprojtree = elementTree.parse(self.aprojFile)
aprojroot = self.aprojtree.getroot()
aprojlookupType = []
aprojlookupID = []
aprojlookupName = []
aprojlookupElement = []
def aprojlookupAppend(el, elType):
aprojlookupType.append(elType)
aprojlookupID.append(el.find("ID").text)
aprojlookupName.append(el.find("Name").text)
aprojlookupElement.append(el)
def aprojlookupAppendNode(el, elType):
aprojlookupType.append(elType)
aprojlookupID.append(el.find("ID").text)
aprojlookupName.append(el.find("Text").text)
aprojlookupElement.append(el)
def aprojlookupAppend2(el, elType):
aprojlookupType.append(elType)
aprojlookupID.append(el.find("ID").text)
s = (el.find("OriginID").text)
t = (el.find("DestinationID").text)
sm = np.array(aprojlookupElement
)[np.where(np.array(aprojlookupID) == s)[0]]
if len(sm) == 1:
sname = sm[0].find("Text").text
tm = np.array(aprojlookupElement
)[np.where(np.array(aprojlookupID) == t)[0]]
if len(tm) == 1:
tname = tm[0].find("Text").text
aprojlookupName.append(sname + "*" + tname)
# print sname + "*" + tname,
aprojlookupElement.append(el)
def aprojlookupAppend3(el, elType):
aprojlookupType.append(elType)
aprojlookupID.append(el.find("ID").text)
aprojlookupName.append(el.find("ColumnName").text)
aprojlookupElement.append(el)
def aprojlookupAppend4(el, elType):
aprojlookupType.append(elType)
aprojlookupID.append(el.find("ID").text)
s = (el.find("OriginID").text)
t = (el.find("DestinationID").text)
sm = np.array(aprojlookupElement
)[np.where(np.array(aprojlookupID) == s)[0]]
if len(sm) == 1:
sname = sm[0].find("Text").text
tm = np.array(aprojlookupElement
)[np.where(np.array(aprojlookupID) == t)[0]]
if len(tm) == 1:
tname = tm[0].find("Text").text
aprojlookupName.append(sname + "*" + tname)
# print sname + "*" + tname,
aprojlookupElement.append(el)
print "\nREADING .aproj elements..."
# print "Stimuli"
path = "Simulation/Stimuli"
for el in list(aprojroot.find(path)):
# print el.find("Name").text,
aprojlookupAppend(el, "Stimulus")
# print
# print "Neurons"
path = "Simulation/Environment/Organisms"
organisms = list(aprojroot.find(path))
for organism in organisms:
ns = organism.find("NervousSystem/Node")
if ns is None:
print "No Neuron"
elif ns is not None:
nodes = ns.find("Nodes")
# print nodes, list(nodes)
for el in list(nodes):
cn = el.find("ClassName").text
if cn.split('.')[0] == "IntegrateFireGUI":
# print el.find("Text").text,
aprojlookupAppendNode(el, "Neurons")
# print
# print "Adapters"
path = "Simulation/Environment/Organisms"
organisms = list(aprojroot.find(path))
for organism in organisms:
ns = organism.find("NervousSystem/Node")
if ns is None:
print "No Neuron"
elif ns is not None:
nodes = ns.find("Nodes")
# print nodes, list(nodes)
for el in list(nodes):
cn = el.find("ClassName").text
if cn.split('.')[-1] == "NodeToPhysicalAdapter":
# print el.find("Text").text,
aprojlookupAppendNode(el, "Adapters")
elif cn.split('.')[-1] == "PhysicalToNodeAdapter":
# print el.find("Text").text,
aprojlookupAppendNode(el, "Adapters")
# print
# print "Symapses types"
path = "Simulation/Environment/Organisms"
organisms = list(aprojroot.find(path))
for organism in organisms:
ns = organism.find("NervousSystem/NeuralModules")
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
for node in ns:
syty = node.find("SynapseTypes")
if syty is not None:
# print syty
li = list(syty)
for el in li:
# print el.find("Name").text,
aprojlookupAppend(el, "Synapses")
# print
# print "Connexions (Links)"
path = "Simulation/Environment/Organisms"
organisms = list(aprojroot.find(path))
for organism in organisms:
ns = organism.find("NervousSystem/Node")
if ns is None:
print "No Neuron"
elif ns is not None:
links = ns.find("Links")
for el in list(links):
cn = el.find("ClassName").text
if cn.split('.')[0] == "IntegrateFireGUI":
aprojlookupAppend2(el, "Connexions")
# print
# print "NeuronsFR"
path = "Simulation/Environment/Organisms"
organisms = list(aprojroot.find(path))
for organism in organisms:
ns = organism.find("NervousSystem/Node")
if ns is None:
print "No Neuron"
elif ns is not None:
nodes = ns.find("Nodes")
for el in list(nodes):
cn = el.find("ClassName").text
if cn.split('.')[0] == "FiringRateGUI":
# print el.find("Text").text,
aprojlookupAppendNode(el, "NeuronsFR")
# print
# print "ConnexionsFR (Links)"
path = "Simulation/Environment/Organisms"
organisms = list(aprojroot.find(path))
for organism in organisms:
ns = organism.find("NervousSystem/Node")
if list(ns) == []:
print "No Neuron"
elif list(ns) != []:
for el in list(ns.find('Links')):
# print el
cn = el.find("ClassName").text
# print cn
if cn.split('.')[-1] == "Normal":
aprojlookupAppend4(el, "SynapsesFR")
self.aprojlookup = {}
self.aprojlookup["Type"] = aprojlookupType
self.aprojlookup["ID"] = aprojlookupID
self.aprojlookup["Name"] = aprojlookupName
self.aprojlookup["Element"] = aprojlookupElement
# GET functions
def get_aproj(self):
"""
get_aproj()
Returns a character string of the .aproj file.
Last updated: December 28, 2015
Modified by: Bryce Chung <bchung4@student.gsu.edu>
"""
return self.aprojFile
def get_asim(self):
"""
get_asim()
Returns a character string of the .asim file.
Last updated: December 28, 2015
Modified by: Bryce Chung <bchung4@student.gsu.edu>
"""
return self.asimFile
def get_xml(self):
"""
get_xml()
Returns an XML root element for the XML tree.
Last updated: December 28, 2015
Modified by: Bryce Chung <bchung4@student.gsu.edu>
"""
try:
return self.tree.getroot()
except:
raise KeyError("XML tree element not initiated")
# SET functions
def set_aproj(self, filePath):
"""
set_aproj(filePath)
Sets the full file path string for the .aproj file.
Last updated: December 28, 2015
Modified by: Bryce Chung <bchung4@student.gsu.edu>
"""
self.aprojFile = filePath
def set_asim(self, filePath):
"""
set_asim(filePath)
Sets the full file path string for the .asim file.
Last updated: December 28, 2015
Modified by: Bryce Chung <bchung4@student.gsu.edu>
"""
if os.path.isfile(filePath):
self.asimFile = filePath
self.tree = elementTree.parse(self.asimFile)
root = self.tree.getroot()
else:
warning = "No ASIM file specified for AnimatLab Model object."
raise AnimatLabModelError(warning)
def getElementByType(self, elType):
"""
getElementByType(elType)
Returns an array of XML elements with the type, elType
elType Options: "Neurons", "ExternalStimuli", "Adapters"
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
return np.array(self.lookup["Element"]
)[np.where(np.array(self.lookup["Type"]) == elType)[0]]
def getElementByType2(self, elType):
"""
getElementByType(elType)
Returns an array of XML elements with the type, elType
elType Options: "Neurons", "ExternalStimuli", "Adapters"
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
return np.array(self.lookup["Element"]
)[np.where(np.array(self.lookup["Type"]) == elType)[0]]
def getElementByName(self, elName):
"""
getElementByName(elName)
Returns an XML element with the specified name, elName
elName AnimatLab name of the desired element
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
matches = np.array(self.lookup["Element"]
)[np.where(np.array(self.lookup["Name"]
) == elName)[0]]
if len(matches) > 1:
warning = "WARNING: More than one element with name found!!\n\n \
%i instance(s) with name %s"
print warning % (len(matches), elName)
return matches
elif len(matches) == 1:
return matches[0]
else:
warning = "WARNING: No matches found for elements with name:\n%s"
print warning % elName
return None
def getElementByID(self, elID):
"""
getElementByID(elID)
Returns an XML element by the AnimatLab ID
elID Specifies the AnimatLab ID of the desired element
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
matches = np.array(self.lookup["Element"]
)[np.where(np.array(self.lookup["ID"]) == elID)[0]]
if len(matches) > 1:
warning = "WARNING: More than one element with ID found!!\n\n \
%i instance(s) with ID %s"
print warning % (len(matches, elID))
return matches
elif len(matches) == 1:
return matches[0]
else:
print "WARNING: No matches found for elements with ID:\n%s" % elID
return None
def saveXML(self, fileName='', overwrite=False):
"""
saveXML(fileName='', overwrite=False)
Saves the current AnimatLabModel object as a .asim file
with the path name, fileName.
fileName Specifies the name of the .asim file.
overwrite Boolean flag to overwrite an existing .asim file.
The default file path is the project folder of
the AnimatLabModel instantiation.
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
if overwrite:
if fileName == '':
fileName = self.asimFile
else:
fileName = os.path.join(os.path.dirname(self.asimFile),
os.path.splitext(fileName)[0]+'.asim')
else:
saveDir = os.path.dirname(self.asimFile)
rootName = os.path.basename(os.path.splitext(self.asimFile)[0])
oldname = rootName + '*.asim'
ix = len(glob.glob(os.path.join(saveDir, oldname)))
newname = rootName + '-%i.asim' % ix
fileName = os.path.join(saveDir, newname)
"""
print "----------------------------------"
print 'asimFile', self.asimFile
print 'fileName', fileName
print 'overwrite', overwrite
if fileName == '':
if overwrite:
fileName = self.asimFile
print '--fileName', fileName
else:
saveDir = os.path.split(self.asimFile)[0]
rootName = os.path.split(self.asimFile)[-1].split('.')[0]
oldname = rootName + '*.asim'
ix = len(glob.glob(os.path.join(saveDir, oldname)))
newname = rootName + '-%i.asim' % ix
fileName = os.path.join(saveDir, newname)
print 'saveDir', saveDir
print 'rootName', rootName
print 'oldName', oldname
print 'newName', newname
print 'fileName', fileName
else:
if overwrite:
fileName = os.path.join(os.path.split(self.asimFile)[0],
fileName.split('.')[0]+'.asim')
else:
saveDir = os.path.split(self.asimFile)[0]
rootName = os.path.split(self.asimFile)[-1].split('.')[0]
oldname = rootName + '*.asim'
ix = len(glob.glob(os.path.join(saveDir, oldname)))
newname = rootName + '-%i.asim' % ix
fileName = os.path.join(saveDir, newname)
ix = len(glob.glob(
os.path.join(os.path.split(self.asimFile)[0],
fileName.split('.')[0]+'*.asim')))
fileName = os.path.join(os.path.split(self.asimFile)[0],
fileName.split('.')[0]+'-%i.asim' % ix)
"""
print 'Saving file: %s' % fileName
self.tree.write(fileName)
def getElementByNameAproj(self, elName):
"""
getElementByName(elName)
Returns an XML element with the specified name, elName
elName AnimatLab name of the desired element
Last updated: Februaryr 20, 2017
Modified by: Daniel Cattaert
"""
matches = np.array(self.aprojlookup["Element"]
)[np.where(np.array(self.aprojlookup["Name"]
) == elName)[0]]
if len(matches) > 1:
warning = "WARNING: More than one element with name found!!\n\n \
%i instance(s) with name %s"
print warning % (len(matches), elName)
return matches
elif len(matches) == 1:
return matches[0]
else:
warning = "WARNING: No matches found for elements with name:\n%s"
print warning % elName
return None
def changeMeshPath(self, newMeshPath):
sp = ""
def changeDir(oldDir, newMeshPath, meshDir):
newPath = newMeshPath[:newMeshPath.find(meshDir)] + \
oldDir[oldDir.find(meshDir):]
return newPath
def findmesh(branch, sp):
for elt in branch:
print sp + "elt", elt
try:
meshpath = elt.find("MeshFile").text
print sp + meshpath
new = changeDir(meshpath, newMeshPath, "MaleSkeleton")
elt.find("MeshFile").text = new
print sp + new
except:
pass
try:
cb = list(elt.find("ChildBodies"))
print sp + "childbodies found"
sp = sp + "\t"
findmesh(cb, sp)
except:
pass
# self.tree = elementTree.parse(self.asimFile)
self.aprojtree = elementTree.parse(self.aprojFile)
root = self.aprojtree.getroot()
path = "Simulation/Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
print organism.find("Name").text
findmesh(organism, sp)
# self.tree = elementTree.parse(self.asimFile) # return to asimfile
# root = self.tree.getroot()
def asimtoaproj(self, el, ptVar, pts, simpar, affiche=0):
# reads Animatlab Aproj file param value and scale
va = el.get("Value")
sc = el.get("Scale")
ac = el.get("Actual")
if simpar == "Value":
txt1 = str(ptVar)
for k in range(4-(len(txt1)/8)):
txt1 += "\t"
txt2 = "= " + str(va)
for k in range(2-(len(txt2)/8)):
txt2 += "\t"
if affiche:
print txt1 + txt2 + ">>\t" + str(pts[ptVar])
# Update the AnimatLab element value
newValue = pts[ptVar]
if sc == 'nano':
newActual = newValue * 1e-09
elif sc == 'micro':
newActual = newValue * 1e-06
elif sc == 'milli':
newActual = newValue * 1e-03
elif sc == 'None':
newActual = newValue
else:
txt1 = str(ptVar)
for k in range(4-(len(txt1)/8)):
txt1 += "\t"
txt2 = "= " + str(ac)
for k in range(2-(len(txt2)/8)):
txt2 += "\t"
if affiche:
print txt1 + txt2 + ">>\t" + str(pts[ptVar])
# Update the AnimatLab element value
newActual = pts[ptVar]
if sc == 'nano':
newValue = newActual / 1e-09
elif sc == 'micro':
newValue = newActual / 1e-06
elif sc == 'milli':
newValue = newActual / 1e-03
elif sc == 'None':
newValue = newActual
el.set("Value", str(newValue))
el.set("Scale", sc)
el.set("Actual", str(newActual))
el.set("Value", str(newValue))
el.set("Scale", sc)
el.set("Actual", str(newActual))
def actualizeAproj(self, obj_simSet, affiche=0):
for ix, pts in enumerate(obj_simSet.samplePts):
# print ix, pts
for ptVar in pts:
# Find the AnimatLab element by name
name, param = ptVar.split('.')
# print name, param
node = self.getElementByNameAproj(name)
if param == 'G':
# ATTENTION!!! Animatlab simfile indique G = Value
el = node.find('SynapticConductance')
self.asimtoaproj(el, ptVar, pts, "Value", affiche)
elif param == 'SynAmp':
# ATTENTION!!! Animatlab simfile indique SynAmp = Value
el = node.find('MaxSynapticConductance')
self.asimtoaproj(el, ptVar, pts, "Value", affiche)
elif param == 'ThreshV':
# ATTENTION!!! Animatlab simfile indique ThreshV = Value
el = node.find('PreSynapticThreshold')
self.asimtoaproj(el, ptVar, pts, "Value", affiche)
elif param == "Weight":
# ATTENTION!!! Animatlab simfile indique Weight = Actual
el = node.find('Weight')
self.asimtoaproj(el, ptVar, pts, "Actual", affiche)
elif param == 'CurrentOn':
# ATTENTION!!! Animatlab simfile indique CurrentOn = Actual
el = node.find('CurrentOn')
self.asimtoaproj(el, ptVar, pts, "Actual", affiche)
elif param == 'StartTime':
el = node.find('StartTime')
self.asimtoaproj(el, ptVar, pts, "Actual", affiche)
elif param == 'EndTime':
el = node.find('EndTime')
self.asimtoaproj(el, ptVar, pts, "Actual", affiche)
def actualizeAprojStimState(self, asimtab_stims, affiche=0):
for extStim in range(len(asimtab_stims)):
# Find the AnimatLab element by name
name = asimtab_stims[extStim][0]
state = asimtab_stims[extStim][5]
# print name, state
node = self.getElementByNameAproj(name)
el = node.find('Enabled').text
node.find('Enabled').text = str(state)
if affiche:
txt1 = str(name)
for k in range(3-(len(txt1)/8)):
txt1 += "\t"
txt2 = str(el) + " ---> "
for k in range(2-(len(txt2)/8)):
txt2 += "\t"
print txt1 + txt2 + str(state)
# print name, "\t", el, "--->", "\t", state
def actualizeAprojMotorState(self, asimtab_motorst, affiche=0):
for motorSt in range(len(asimtab_motorst)):
# Find the AnimatLab element by name
name = asimtab_motorst[motorSt][0]
state = asimtab_motorst[motorSt][5]
# print name, state
node = self.getElementByNameAproj(name)
el = node.find('Enabled').text
node.find('Enabled').text = str(state)
if affiche:
txt1 = str(name)
for k in range(3-(len(txt1)/8)):
txt1 += "\t"
txt2 = str(el) + " ---> "
for k in range(2-(len(txt2)/8)):
txt2 += "\t"
print txt1 + txt2 + str(state)
# print name, "\t", el, "--->", "\t", state
def saveXMLaproj(self, fileName='', overwrite=False):
"""
saveXML(fileName='', overwrite=False)
Saves the current AnimatLabAproj object as a .aproj file with the
path name, fileName.split
fileName Specifies the name of the .aproj file.
overwrite Boolean flag to overwrite an existing .proj file.
The default file path is the project folder of the AnimatLabAProj
instantiation.
Last updated: September 19, 2017
Modified by: Daniel Cattaert
"""
if fileName == '':
if overwrite:
fileNameOK = self.aprojFile
else:
saveDir = os.path.split(self.aprojFile)[0]
saveName = os.path.split(self.aprojFile)[-1]
rootName = os.path.splitext(saveName)[0]
oldname = rootName + '*.aproj'
ix = len(glob.glob(os.path.join(saveDir, oldname)))
newname = rootName + '-{0:d}.aproj'.format(ix)
fileNameOK = os.path.join(saveDir, newname)
else:
if overwrite:
saveDir = os.path.split(fileName)[0]
ficName = os.path.splitext(fileName)[0] + '.aproj'
fileNameOK = os.path.join(saveDir, ficName)
else:
saveDir = os.path.split(fileName)[0]
ficName = os.path.splitext(fileName)[0] + '*.aproj'
ix = len(glob.glob(os.path.join(saveDir, ficName)))
newname = os.path.splitext(fileName)[0] +\
'-{0:d}.aproj'.format(ix)
fileNameOK = os.path.join(saveDir, newname)
print 'Saving file: {}'.format(fileNameOK)
self.aprojtree.write(fileNameOK)
return fileNameOK
class AnimatLabSimFile(object):
def __init__(self, asimFile=''):
"""
__init__(asimFile='')
Initializes an AnimatLabSimFile class object.
asimFile Specifies full .asim file path for AnimatLab asim File
Last updated: February 20, 2017
Modified by: DAniel Cattaert
"""
# # Set root folder for AnimatLab model resource files
try:
if asimFile != '':
# # Check to see if AnimatLab asimfile exists if specified
if os.path.isfile(asimFile):
self.asimFile = asimFile
else:
error = "Specified AnimatLab simulation file does not " \
"exist: %s"
raise AnimatLabModelError(error % asimFile)
except:
pass
# Set up lookup table for asim model elements
self.tree = elementTree.parse(self.asimFile)
root = self.tree.getroot()
lookupType = []
lookupID = []
lookupName = []
lookupElement = []
def lookupAppend(el, elType):
lookupType.append(elType)
lookupID.append(el.find("ID").text)
lookupName.append(el.find("Name").text)
lookupElement.append(el)
def lookupAppend2(el, elType):
lookupType.append(elType)
lookupID.append(el.find("ID").text)
s = (el.find("SourceID").text)
t = (el.find("TargetID").text)
sm = np.array(lookupElement)[np.where(np.array(lookupID) == s)[0]]
if len(sm) == 1:
sname = sm[0].find("Name").text
tm = np.array(lookupElement)[np.where(np.array(lookupID) == t)[0]]
if len(tm) == 1:
tname = tm[0].find("Name").text
# lookupName.append(s + "*" + t)
lookupName.append(sname + "*" + tname)
lookupElement.append(el)
def lookupAppend3(el, elType):
lookupType.append(elType)
lookupID.append(el.find("ID").text)
lookupName.append(el.find("ColumnName").text)
lookupElement.append(el)
def lookupAppend4(el, elType, NeuID):
lookupType.append(elType)
lookupID.append(el.find("ID").text)
s = (el.find("FromID").text)
t = NeuID
sm = np.array(lookupElement)[np.where(np.array(lookupID) == s)[0]]
if len(sm) == 1:
sname = sm[0].find("Name").text
tm = np.array(lookupElement)[np.where(np.array(lookupID) == t)[0]]
if len(tm) == 1:
tname = tm[0].find("Name").text
# lookupName.append(s + "*" + t)
lookupName.append(sname + "*" + tname)
lookupElement.append(el)
######################################################################
"""
modified August 30, 2017 (D. Cattaert) to handle Joints parameters
"""
def analyzeChilbodies(rb, level):
txt = ""
rbfound = 0
chrblist = []
for n in range(level):
txt += "\t"
# print txt, level, rb, rb.find("Name").text
el = rb.find("Joint")
if el is not None:
print txt + el.find("Name").text
lookupAppend(el, "Joint")
elt = rb.find("ChildBodies")
if elt is not None:
rbfound = 1
chrblist = list(elt)
# if rbfound == 0:
# print txt + "No childbodies"
# if rbfound == 1:
# print txt + "childbodies found",
# print txt, level, chrblist
return [rbfound, chrblist]
print "\nREADING .asim elements..."
"""
modified August 30, 2017 (D. Cattaert) to handle Joints parameters
"""
level = 0
rbfound = 0
subfound = 0
childRbNb = 0
nbsub = 1 # the firt list of rigid bodies
subchrblist = []
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for rigidbodyelmt in list(organism.find("RigidBody")):
# print rigidbodyelmt
if list(rigidbodyelmt) != []:
# print list(rigidbodyelmt)
subfound = 1
rbeltlist = list(rigidbodyelmt)
subchrblist.append(rbeltlist)
childRbNb = 0
# number of child RigidBodies
while subfound:
for ch in range(nbsub):
childRbNb = 0
subfound = 0 # flag to indicate a child rb exists
# first looks for all childbodies from same parent
for rb in subchrblist[level+ch]:
[rbfound, chrblist] = analyzeChilbodies(rb,
level)
if rbfound:
childRbNb += 1
subfound = 1
# each time childbodies are found, the list
# is added to the subchrblist
subchrblist.append(chrblist)
nbsub = childRbNb
# ... continues the analysis of the parent
if subfound: # once the parent has been scaned,
level += 1 # and childbodies found, each child
# becomes parent: the process starts again
######################################################################
for el in list(root.find("ExternalStimuli")):
if el.find("Type").text == "MotorPosition":
lookupAppend(el, "MotorPosition")
for el in list(root.find("ExternalStimuli")):
if el.find("Type").text == "MotorVelocity":
lookupAppend(el, "MotorVelocity")
for el in list(root.find("ExternalStimuli")):
if el.find("Type").text == "Current":
lookupAppend(el, "ExternalStimuli")
# path = "Environment/Organisms/Organism/NervousSystem/NeuralModules"
# modules = root.find(path).getchildren()
# for module in modules:
"""
# modified by Daniel Cattaert May 2016
in order to allow this module to work when a second organism is added
the three lines above have been replaced in the three next lines by
"""
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
print list(ns)
for mod in ns:
if mod.find("ModuleName").text == 'IntegrateFireSim':
for el in list(mod.find("Neurons")):
lookupAppend(el, "Neurons")
elif mod.find("ModuleName").text == 'PhysicsModule':
for el in list(mod.find("Adapters")):
lookupAppend(el, "Adapters")
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
print list(ns)
for mod in ns:
if mod.find("ModuleName").text == 'IntegrateFireSim':
for syn in list(mod.find("Synapses")):
# print syn
for el in syn:
lookupAppend(el, "Synapses")
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
print list(ns)
for mod in ns:
if mod.find("ModuleName").text == 'IntegrateFireSim':
for el in list(mod.find("Connexions")):
lookupAppend2(el, "Connexions")
for el in list(root.find("DataCharts")):
lookupAppend(el, "Chart")
"""
path = "DataCharts/DataChart/DataColumns"
modules = list(root.find(path))
for el in modules:
lookupAppend3(el, "ChartcolName")
"""
ch = 0
for module in list(root.find("DataCharts")):
print module.find("Name").text
for el in list(module.find("DataColumns")):
typ = "ChartCol" + str(ch)
lookupAppend3(el, typ)
ch += 1
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
print list(ns)
for module in ns:
if module.find("ModuleName").text == 'FiringRateSim':
for el in list(module.find("Neurons")):
lookupAppend(el, "NeuronsFR")
path = "Environment/Organisms"
organisms = list(root.find(path))
for organism in organisms:
for ns in list(organism.find("NervousSystem")):
if list(ns) == []:
print "No NeuralModule"
elif list(ns) != []:
print list(ns)
for module in ns:
if module.find("ModuleName").text == 'FiringRateSim':
for el in list(module.find("Neurons")):
NeurID = el.find("ID").text
for syn in list(el.find("Synapses")):
lookupAppend4(syn, "SynapsesFR", NeurID)
self.lookup = {}
self.lookup["Type"] = lookupType
self.lookup["ID"] = lookupID
self.lookup["Name"] = lookupName
self.lookup["Element"] = lookupElement
def getElementByType(self, elType):
"""
getElementByType(elType)
Returns an array of XML elements with the type, elType
elType Options: "Neurons", "ExternalStimuli", "Adapters"
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
return np.array(self.lookup["Element"]
)[np.where(np.array(self.lookup["Type"]) == elType)[0]]
def getElementByType2(self, elType):
"""
getElementByType(elType)
Returns an array of XML elements with the type, elType
elType Options: "Neurons", "ExternalStimuli", "Adapters"
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
return np.array(self.lookup["Element"]
)[np.where(np.array(self.lookup["Type"]) == elType)[0]]
def getElementByName(self, elName):
"""
getElementByName(elName)
Returns an XML element with the specified name, elName
elName AnimatLab name of the desired element
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
matches = np.array(self.lookup["Element"]
)[np.where(np.array(self.lookup["Name"]
) == elName)[0]]
if len(matches) > 1:
warning = "WARNING: More than one element with name found!!\n\n \
%i instance(s) with name %s"
print warning % (len(matches), elName)
return matches
elif len(matches) == 1:
return matches[0]
else:
warning = "WARNING: No matches found for elements with name:\n%s"
print warning % elName
return None
def getElementByID(self, elID):
"""
getElementByID(elID)
Returns an XML element by the AnimatLab ID
elID Specifies the AnimatLab ID of the desired element
Last updated: December 28, 2015
Modified by: Bryce Chung
"""
matches = np.array(self.lookup["Element"]
)[np.where(np.array(self.lookup["ID"]) == elID)[0]]
if len(matches) > 1:
warning = "WARNING: More than one element with ID found!!\n\n \
%i instance(s) with ID %s"
print warning % (len(matches, elID))
return matches
elif len(matches) == 1:
return matches[0]
else:
print "WARNING: No matches found for elements with ID:\n%s" % elID
return None
"""
if __name__ == '__main__':
folders = FolderOrg(subdir="ArmSPike13e2")
folders.affectDirectories()
projectFolder = folders.animatlab_commonFiles_dir
model = AnimatLabModel(folders.animatlab_commonFiles_dir)
newMeshPath = "\\\\MAC\Home\Documents\Labo\Scripts\AnimatLabV2\Human\\"
newMeshPath += "MaleSkeleton"
model.changeMeshPath(newMeshPath)
aprojSaveDir = folders.animatlab_rootFolder + "AprojFiles/"
if not os.path.exists(aprojSaveDir):
os.makedirs(aprojSaveDir)
# model.saveXMLaproj(aprojSaveDir + "ArmSpike13.aproj")
"""
|
neuRowsATL/animatLabSimulationAPI
|
class_animatLabModel.py
|
Python
|
gpl-2.0
| 56,066
|
[
"NEURON"
] |
34de3af7e1e70fed1dfac9bde1446366d5eea1076475a3c5d93c1ad12bb16a21
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=no-init
"""
This is a Python algorithm, with profile
fitting for integrating peaks.
"""
# This __future__ import is for Python 2/3 compatibility
from __future__ import (absolute_import, division, print_function)
import sys
from mantid.kernel import *
from mantid.api import *
from mantid.simpleapi import *
import numpy as np
class IntegratePeaksProfileFitting(PythonAlgorithm):
def summary(self):
return 'Fits a series of peaks using 3D profile fitting as an Ikeda-Carpenter function by a bivariate gaussian.'
def category(self):
# defines the category the algorithm will be put in the algorithm browser
return 'Crystal\\Integration'
def PyInit(self):
# Declare a property for the output workspace
self.declareProperty(WorkspaceProperty(name='OutputPeaksWorkspace',
defaultValue='',
direction=Direction.Output),
doc='PeaksWorkspace with integrated peaks')
self.declareProperty(WorkspaceProperty(name='OutputParamsWorkspace',
defaultValue='',
direction=Direction.Output),
doc='MatrixWorkspace with fit parameters')
self.declareProperty(WorkspaceProperty(name='InputWorkspace',
defaultValue='',
direction=Direction.Input),
doc='An input Sample MDHistoWorkspace or MDEventWorkspace in HKL.')
self.declareProperty(WorkspaceProperty(name='PeaksWorkspace',
defaultValue='',
direction=Direction.Input),
doc='PeaksWorkspace with peaks to be integrated.')
self.declareProperty(FileProperty(name="UBFile",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".mat"]),
doc="File containing the UB Matrix in ISAW format. Leave blank to use loaded UB Matrix.")
self.declareProperty(FileProperty(name="ModeratorCoefficientsFile",
defaultValue="",action=FileAction.OptionalLoad,
extensions=[".dat"]),
doc="File containing the Pade coefficients describing moderator emission versus energy.")
self.declareProperty(FileProperty("StrongPeakParamsFile",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".pkl"]),
doc="File containing strong peaks profiles. If left blank, strong peaks will be fit first.")
self.declareProperty("IntensityCutoff", defaultValue=0., doc="Minimum number of counts to force a profile")
edgeDocString = 'Pixels within EdgeCutoff from a detector edge will be have a profile forced.'
self.declareProperty("EdgeCutoff", defaultValue=0., doc=edgeDocString)
self.declareProperty("FracStop", defaultValue=0.05, validator=FloatBoundedValidator(lower=0., exclusive=True),
doc="Fraction of max counts to include in peak selection.")
self.declareProperty("MinpplFrac", defaultValue=0.9, doc="Min fraction of predicted background level to check")
self.declareProperty("MaxpplFrac", defaultValue=1.1, doc="Max fraction of predicted background level to check")
self.declareProperty("DQMax", defaultValue=0.15, doc="Largest total side length (in Angstrom) to consider for profile fitting.")
self.declareProperty("PeakNumber", defaultValue=-1, doc="Which Peak to fit. Leave negative for all.")
def initializeStrongPeakSettings(self, strongPeaksParamsFile, peaks_ws, sampleRun, forceCutoff, edgeCutoff, numDetRows,
numDetCols):
import pickle
# Strong peak profiles - we set up the workspace and determine which peaks we'll fit.
strongPeakKeys = ['Phi', 'Theta', 'Scale3d', 'FitPhi', 'FitTheta', 'SigTheta', 'SigPhi', 'SigP', 'PeakNumber']
strongPeakDatatypes = ['float']*len(strongPeakKeys)
strongPeakParams_ws = CreateEmptyTableWorkspace(OutputWorkspace='__StrongPeakParameters')
for key, datatype in zip(strongPeakKeys,strongPeakDatatypes):
strongPeakParams_ws.addColumn(datatype, key)
# Either load the provided strong peaks file or set the flag to generate it as we go
if strongPeaksParamsFile != "":
if sys.version_info[0] == 3:
strongPeakParams = pickle.load(open(strongPeaksParamsFile, 'rb'),encoding='latin1')
else:
strongPeakParams = pickle.load(open(strongPeaksParamsFile, 'rb'))
generateStrongPeakParams = False
# A strong peaks file was provided - we don't need to generate it on the fly so we can fit in order
runNumbers = np.array(peaks_ws.column('RunNumber'))
peaksToFit = np.where(runNumbers == sampleRun)[0]
intensities = np.array(peaks_ws.column('Intens'))
rows = np.array(peaks_ws.column('Row'))
cols = np.array(peaks_ws.column('Col'))
runNumbers = np.array(peaks_ws.column('RunNumber'))
intensIDX = intensities < forceCutoff
edgeIDX = np.logical_or.reduce(np.array([rows < edgeCutoff, rows > numDetRows - edgeCutoff,
cols < edgeCutoff, cols > numDetCols - edgeCutoff]))
needsForcedProfile = np.logical_or(intensIDX, edgeIDX)
needsForcedProfileIDX = np.where(needsForcedProfile)[0]
canFitProfileIDX = np.where(~needsForcedProfile)[0]
numPeaksCanFit = len(canFitProfileIDX)
# We can populate the strongPeakParams_ws now and use that for initial BVG guesses
for row in strongPeakParams:
strongPeakParams_ws.addRow(row)
else:
generateStrongPeakParams = True
#Figure out which peaks to fit without forcing a profile and set those to be fit first
intensities = np.array(peaks_ws.column('Intens'))
rows = np.array(peaks_ws.column('Row'))
cols = np.array(peaks_ws.column('Col'))
runNumbers = np.array(peaks_ws.column('RunNumber'))
intensIDX = intensities < forceCutoff
edgeIDX = np.logical_or.reduce(np.array( [rows < edgeCutoff, rows > numDetRows - edgeCutoff,
cols < edgeCutoff, cols > numDetCols - edgeCutoff]))
needsForcedProfile = np.logical_or(intensIDX, edgeIDX)
needsForcedProfileIDX = np.where(needsForcedProfile)[0]
canFitProfileIDX = np.where(~needsForcedProfile)[0]
numPeaksCanFit = len(canFitProfileIDX)
peaksToFit = np.append(canFitProfileIDX, needsForcedProfileIDX) #Will fit in this order
peaksToFit = peaksToFit[runNumbers[peaksToFit]==sampleRun]
# Initialize our strong peaks dictionary. Set BVG Params to be None so that we fall back on
# instrument defaults until we have fit >=30 peaks.
strongPeakParams = np.empty([numPeaksCanFit, 9])
#sigX0Params, sigY0, sigP0Params = None, None, None
peaksToFit = np.append(peaksToFit, np.where(runNumbers!=sampleRun)[0])
return generateStrongPeakParams, strongPeakParams, strongPeakParams_ws, needsForcedProfile,\
needsForcedProfileIDX, canFitProfileIDX, numPeaksCanFit, peaksToFit
def getBVGInitialGuesses(self, peaks_ws, strongPeakParams_ws, minNumberPeaks=30):
"""
Returns initial guesses for the BVG fit if strongPeakParams_ws contains more than
minNumberPeaks entries. If not, we return all None, which will fall back to the
instrument defaults.
"""
if strongPeakParams_ws.rowCount() > minNumberPeaks:
# First, along the scattering direction
theta = np.abs(strongPeakParams_ws.column('Theta'))
sigma_theta = np.abs(strongPeakParams_ws.column('SigTheta'))
CreateWorkspace(DataX=theta, DataY=sigma_theta, OutputWorkspace='__ws_bvg0_scat')
Fit(Function='name=UserFunction,Formula=A/2.0*(exp(((x-x0)/b))+exp( -((x-x0)/b)))+BG,A=0.0025,x0=1.54,b=1,BG=-1.26408e-15',
InputWorkspace='__ws_bvg0_scat', Output='__fitSigX0', StartX=np.min(theta), EndX=np.max(theta))
sigX0Params = mtd['__fitSigX0_Parameters'].column(1)[:-1]
# Second, along the azimuthal. This is just a constant.
sigY0 = np.median(strongPeakParams_ws.column('SigPhi'))
# Finally, the interaction term. This we just get from the instrument file.
try:
sigP0Params = peaks_ws.getInstrument().getStringParameter("sigP0Params")
sigP0Params = np.array(str(sigP0Params).strip('[]\'').split(),dtype=float)
except:
logger.warning('Cannot find sigP0Params. Will use defaults.')
sigP0Params = [0.1460775, 1.85816592, 0.26850086, -0.00725352]
return sigX0Params, sigY0, sigP0Params
else:
return None, None, None
def getUBMatrix(self, peaks_ws, UBFile):
# Load the UB Matrix if one is not already loaded
if UBFile == '' and peaks_ws.sample().hasOrientedLattice():
logger.information("Using UB file already available in PeaksWorkspace")
else:
try:
from mantid.simpleapi import LoadIsawUB
LoadIsawUB(InputWorkspace=peaks_ws, FileName=UBFile)
except:
logger.error("peaks_ws does not have a UB matrix loaded. Must provide a file")
UBMatrix = peaks_ws.sample().getOrientedLattice().getUB()
return UBMatrix
def PyExec(self):
import ICCFitTools as ICCFT
import BVGFitTools as BVGFT
from scipy.ndimage.filters import convolve
MDdata = self.getProperty('InputWorkspace').value
peaks_ws = self.getProperty('PeaksWorkspace').value
fracStop = self.getProperty('FracStop').value
dQMax = self.getProperty('DQMax').value
UBFile = self.getProperty('UBFile').value
padeFile = self.getProperty('ModeratorCoefficientsFile').value
strongPeaksParamsFile = self.getProperty('StrongPeakParamsFile').value
forceCutoff = self.getProperty('IntensityCutoff').value
edgeCutoff = self.getProperty('EdgeCutoff').value
peakNumberToFit = self.getProperty('PeakNumber').value
pplmin_frac = self.getProperty('MinpplFrac').value
pplmax_frac = self.getProperty('MaxpplFrac').value
sampleRun = peaks_ws.getPeak(0).getRunNumber()
q_frame='lab'
mtd['MDdata'] = MDdata
zBG = 1.96
neigh_length_m=3
iccFitDict = ICCFT.parseConstraints(peaks_ws) #Contains constraints and guesses for ICC Fitting
padeCoefficients = ICCFT.getModeratorCoefficients(padeFile)
# There are a few instrument specific parameters that we define here. In some cases,
# it may improve fitting to set tweak these parameters, but for simplicity we define these here
# The default values are good for MaNDi - new instruments can be added by adding a different elif
# statement.
# If you change these values or add an instrument, documentation should also be changed.
try:
numDetRows = peaks_ws.getInstrument().getIntParameter("numDetRows")[0]
numDetCols = peaks_ws.getInstrument().getIntParameter("numDetCols")[0]
nPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0]
nTheta = peaks_ws.getInstrument().getIntParameter("numBinsTheta")[0]
nPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0]
mindtBinWidth = peaks_ws.getInstrument().getNumberParameter("mindtBinWidth")[0]
maxdtBinWidth = peaks_ws.getInstrument().getNumberParameter("maxdtBinWidth")[0]
fracHKL = peaks_ws.getInstrument().getNumberParameter("fracHKL")[0]
dQPixel = peaks_ws.getInstrument().getNumberParameter("dQPixel")[0]
peakMaskSize = peaks_ws.getInstrument().getIntParameter("peakMaskSize")[0]
except:
logger.error("Cannot find all parameters in instrument parameters file.")
raise
UBMatrix = self.getUBMatrix(peaks_ws, UBFile)
dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5))
dQ[dQ>dQMax] = dQMax
qMask = ICCFT.getHKLMask(UBMatrix, frac=fracHKL, dQPixel=dQPixel,dQ=dQ)
generateStrongPeakParams, strongPeakParams, strongPeakParams_ws, needsForcedProfile, \
needsForcedProfileIDX, canFitProfileIDX, numPeaksCanFit, peaksToFit = \
self.initializeStrongPeakSettings(strongPeaksParamsFile, peaks_ws, sampleRun, forceCutoff, edgeCutoff, numDetRows,
numDetCols)
if peakNumberToFit>-1:
peaksToFit = [peakNumberToFit]
# Create the parameters workspace
keys = ['peakNumber','Alpha', 'Beta', 'R', 'T0', 'bgBVG', 'chiSq3d', 'chiSq', 'dQ', 'KConv', 'MuPH',
'MuTH', 'newQ', 'Scale', 'scale3d', 'SigP', 'SigX', 'SigY', 'Intens3d', 'SigInt3d']
datatypes = ['float']*len(keys)
datatypes[np.where(np.array(keys)=='newQ')[0][0]] = 'V3D'
params_ws = CreateEmptyTableWorkspace()
for key, datatype in zip(keys,datatypes):
params_ws.addColumn(datatype, key)
# And we're off!
peaks_ws_out = peaks_ws.clone()
np.warnings.filterwarnings('ignore') # There can be a lot of warnings for bad solutions that get rejected.
progress = Progress(self, 0.0, 1.0, len(peaksToFit))
sigX0Params, sigY0, sigP0Params = self.getBVGInitialGuesses(peaks_ws, strongPeakParams_ws)
for fitNumber, peakNumber in enumerate(peaksToFit):#range(peaks_ws.getNumberPeaks()):
peakNumber = int(peakNumber)
peak = peaks_ws_out.getPeak(peakNumber)
progress.report(' ')
if peak.getRunNumber() != MDdata.getExperimentInfo(0).getRunNumber():
logger.warning('Peak number %i has run number %i but MDWorkspace is from run number %i. Skipping this peak.'%(
peakNumber, peak.getRunNumber(), MDdata.getExperimentInfo(0).getRunNumber()))
continue
try:
box = ICCFT.getBoxFracHKL(peak, peaks_ws, MDdata, UBMatrix, peakNumber,
dQ, fracHKL=0.5, dQPixel=dQPixel, q_frame=q_frame)
if ~needsForcedProfile[peakNumber]:
strongPeakParamsToSend = None
else:
strongPeakParamsToSend = strongPeakParams
# Will allow forced weak and edge peaks to be fit using a neighboring peak profile
Y3D, goodIDX, pp_lambda, params = BVGFT.get3DPeak(peak, peaks_ws, box, padeCoefficients,qMask,
nTheta=nTheta, nPhi=nPhi, plotResults=False,
zBG=zBG,fracBoxToHistogram=1.0,bgPolyOrder=1,
strongPeakParams=strongPeakParamsToSend,
q_frame=q_frame, mindtBinWidth=mindtBinWidth,
maxdtBinWidth=maxdtBinWidth,
pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac,
forceCutoff=forceCutoff, edgeCutoff=edgeCutoff,
peakMaskSize=peakMaskSize,
iccFitDict=iccFitDict, sigX0Params=sigX0Params,
sigY0=sigY0, sigP0Params=sigP0Params, fitPenalty=1.e7)
# First we get the peak intensity
peakIDX = Y3D/Y3D.max() > fracStop
intensity = np.sum(Y3D[peakIDX])
# Now the number of background counts under the peak assuming a constant bg across the box
n_events = box.getNumEventsArray()
convBox = 1.0*np.ones([neigh_length_m, neigh_length_m,neigh_length_m]) / neigh_length_m**3
conv_n_events = convolve(n_events,convBox)
bgIDX = np.logical_and.reduce(np.array([~goodIDX, qMask, conv_n_events>0]))
bgEvents = np.mean(n_events[bgIDX])*np.sum(peakIDX)
# Now we consider the variation of the fit. These are done as three independent fits. So we need to consider
# the variance within our fit sig^2 = sum(N*(yFit-yData)) / sum(N) and scale by the number of parameters that go into
# the fit. In total: 10 (removing scale variables)
w_events = n_events.copy()
w_events[w_events==0] = 1
varFit = np.average((n_events[peakIDX]-Y3D[peakIDX])*(n_events[peakIDX]-Y3D[peakIDX]), weights=(w_events[peakIDX]))
sigma = np.sqrt(intensity + bgEvents + varFit)
compStr = 'peak {:d}; original: {:4.2f} +- {:4.2f}; new: {:4.2f} +- {:4.2f}'.format(peakNumber,
peak.getIntensity(),
peak.getSigmaIntensity(),
intensity, sigma)
logger.information(compStr)
# Save the results
params['peakNumber'] = peakNumber
params['Intens3d'] = intensity
params['SigInt3d'] = sigma
params['newQ'] = V3D(params['newQ'][0],params['newQ'][1],params['newQ'][2])
params_ws.addRow(params)
peak.setIntensity(intensity)
peak.setSigmaIntensity(sigma)
if generateStrongPeakParams and ~needsForcedProfile[peakNumber]:
qPeak = peak.getQLabFrame()
theta = np.arctan2(qPeak[2], np.hypot(qPeak[0],qPeak[1])) #2theta
try:
p = mtd['__fitSigX0_Parameters'].column(1)[:-1]
tol = 0.2 #We should have a good idea now - only allow 20% variation
except:
p = peaks_ws.getInstrument().getStringParameter("sigSC0Params")
p = np.array(str(p).strip('[]\'').split(),dtype=float)
tol = 5.0 #High tolerance since we don't know what the answer will be
predSigX = BVGFT.coshPeakWidthModel(theta, p[0],p[1],p[2],p[3])
if np.abs((params['SigX'] - predSigX)/1./predSigX) < tol:
strongPeakParams[fitNumber, 0] = np.arctan2(qPeak[1], qPeak[0]) # phi
strongPeakParams[fitNumber, 1] = np.arctan2(qPeak[2], np.hypot(qPeak[0],qPeak[1])) #theta
strongPeakParams[fitNumber, 2] = params['scale3d']
strongPeakParams[fitNumber, 3] = params['MuTH']
strongPeakParams[fitNumber, 4] = params['MuPH']
strongPeakParams[fitNumber, 5] = params['SigX']
strongPeakParams[fitNumber, 6] = params['SigY']
strongPeakParams[fitNumber, 7] = params['SigP']
strongPeakParams[fitNumber, 8] = peakNumber
strongPeakParams_ws.addRow(strongPeakParams[fitNumber])
sigX0Params, sigY0, sigP0Params = self.getBVGInitialGuesses(peaks_ws, strongPeakParams_ws)
except KeyboardInterrupt:
np.warnings.filterwarnings('default') # Re-enable on exit
raise
except:
#raise
logger.warning('Error fitting peak number ' + str(peakNumber))
peak.setIntensity(0.0)
peak.setSigmaIntensity(1.0)
# Cleanup
for wsName in mtd.getObjectNames():
if 'fit_' in wsName or 'bvgWS' in wsName or 'tofWS' in wsName or 'scaleWS' in wsName:
mtd.remove(wsName)
np.warnings.filterwarnings('default') # Re-enable on exit
# Set the output
self.setProperty('OutputPeaksWorkspace', peaks_ws_out)
self.setProperty('OutputParamsWorkspace', params_ws)
# Register algorith with Mantid
AlgorithmFactory.subscribe(IntegratePeaksProfileFitting)
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/IntegratePeaksProfileFitting.py
|
Python
|
gpl-3.0
| 21,364
|
[
"CRYSTAL",
"Gaussian"
] |
47e4e2b3212663490d99421e8e6b2c5a1efb69b6ee83418db74ff2eaaf84352b
|
r"""
Linear elasticity with nodal linear combination constraints.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= - \int_{\Gamma_{right}} \ul{v} \cdot \ull{\sigma} \cdot \ul{n}
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
and :math:`\ull{\sigma} \cdot \ul{n} = \bar{p} \ull{I} \cdot \ul{n}` with given
traction pressure :math:`\bar{p}`. The constraints are given in terms of
coefficient matrices and right-hand sides, see the ``lcbcs`` keyword below. For
instance, ``'nlcbc1'`` in the 3D mesh case corresponds to
.. math::
u_0 - u_1 + u_2 = 0 \\
u_0 + 0.5 u_1 + 0.1 u_2 = 0.05
that should hold in the ``'Top'`` region.
This example demonstrates how to pass command line options to a problem
description file using ``--define`` option of ``simple.py``. Try::
python simple.py examples/linear_elasticity/nodal_lcbcs.py --define='dim: 3'
to use a 3D mesh, instead of the default 2D mesh. The example also shows that
the nodal constraints can be used in place of the Dirichlet boundary
conditions. Try::
python simple.py examples/linear_elasticity/nodal_lcbcs.py --define='use_ebcs: False'
to replace ``ebcs`` with the ``'nlcbc4'`` constraints. The results should be
the same for the two cases. Both options can be combined::
python simple.py examples/linear_elasticity/nodal_lcbcs.py --define='dim: 3, use_ebcs: False'
The :func:`post_process()` function is used both to compute the von Mises
stress and to verify the linear combination constraints.
View the 2D results using::
python postproc.py square_quad.vtk --wireframe -b
python postproc.py square_quad.vtk --wireframe -b --only-names=u -d'u,plot_displacements,rel_scaling=1,color_kind="scalars",color_name="von_mises_stress"'
View the 3D results using::
python postproc.py cube_medium_tetra.vtk --wireframe -b
python postproc.py cube_medium_tetra.vtk --wireframe -b --only-names=u -d'u,plot_displacements,rel_scaling=1,color_kind="scalars",color_name="von_mises_stress"'
"""
import numpy as nm
from sfepy.base.base import output, assert_
from sfepy.mechanics.matcoefs import stiffness_from_lame
from sfepy.mechanics.tensors import get_von_mises_stress
from sfepy import data_dir
def post_process(out, pb, state, extend=False):
"""
Calculate and output strain and stress for given displacements.
"""
from sfepy.base.base import Struct
ev = pb.evaluate
stress = ev('ev_cauchy_stress.2.Omega(m.D, u)', mode='el_avg')
vms = get_von_mises_stress(stress.squeeze())
vms.shape = (vms.shape[0], 1, 1, 1)
out['von_mises_stress'] = Struct(name='output_data', mode='cell',
data=vms, dofs=None)
dim = pb.domain.shape.dim
us = state().reshape((-1, dim))
field = pb.fields['displacement']
if dim == 2:
ii = field.get_dofs_in_region(pb.domain.regions['Top'])
output('top LCBC (u.0 - u.1 = 0):')
output('\n', nm.c_[us[ii], nm.diff(us[ii], 1)])
ii = field.get_dofs_in_region(pb.domain.regions['Bottom'])
output('bottom LCBC (u.0 + u.1 = -0.1):')
output('\n', nm.c_[us[ii], nm.sum(us[ii], 1)])
ii = field.get_dofs_in_region(pb.domain.regions['Right'])
output('right LCBC (u.0 + u.1 = linspace(0, 0.1)):')
output('\n', nm.c_[us[ii], nm.sum(us[ii], 1)])
else:
ii = field.get_dofs_in_region(pb.domain.regions['Top'])
output('top LCBC (u.0 - u.1 + u.2 = 0):')
output('\n', nm.c_[us[ii], us[ii, 0] - us[ii, 1] + us[ii, 2]])
output('top LCBC (u.0 + 0.5 u.1 + 0.1 u.2 = 0.05):')
output('\n', nm.c_[us[ii],
us[ii, 0] + 0.5 * us[ii, 1] + 0.1 * us[ii, 2]])
ii = field.get_dofs_in_region(pb.domain.regions['Bottom'])
output('bottom LCBC (u.2 - 0.1 u.1 = 0.2):')
output('\n', nm.c_[us[ii], us[ii, 2] - 0.1 * us[ii, 1]])
ii = field.get_dofs_in_region(pb.domain.regions['Right'])
output('right LCBC (u.0 + u.1 + u.2 = linspace(0, 0.1)):')
output('\n', nm.c_[us[ii], nm.sum(us[ii], 1)])
return out
def define(dim=2, use_ebcs=True):
assert_(dim in (2, 3))
if dim == 2:
filename_mesh = data_dir + '/meshes/2d/square_quad.mesh'
else:
filename_mesh = data_dir + '/meshes/3d/cube_medium_tetra.mesh'
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'post_process'
}
def get_constraints(ts, coors, region=None):
mtx = nm.ones((coors.shape[0], 1, dim), dtype=nm.float64)
rhs = nm.arange(coors.shape[0], dtype=nm.float64)[:, None]
rhs *= 0.1 / (coors.shape[0] - 1)
return mtx, rhs
functions = {
'get_constraints' : (get_constraints,),
}
fields = {
'displacement': ('real', dim, 'Omega', 1),
}
materials = {
'm' : ({
'D' : stiffness_from_lame(dim, lam=5.769, mu=3.846),
},),
'load' : ({'val' : -1.0},),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (y < -0.499) -v r.Left', 'facet'),
'Top' : ('vertices in (y > 0.499) -v r.Left', 'facet'),
'Left' : ('vertices in (x < -0.499)', 'facet'),
'Right' : ('vertices in (x > 0.499) -v (r.Bottom +v r.Top)', 'facet'),
}
if dim == 2:
lcbcs = {
'nlcbc1' : ('Top', {'u.all' : None}, None, 'nodal_combination',
([[1.0, -1.0]], [0.0])),
'nlcbc2' : ('Bottom', {'u.all' : None}, None, 'nodal_combination',
([[1.0, 1.0]], [-0.1])),
'nlcbc3' : ('Right', {'u.all' : None}, None, 'nodal_combination',
'get_constraints'),
}
else:
lcbcs = {
'nlcbc1' : ('Top', {'u.all' : None}, None, 'nodal_combination',
([[1.0, -1.0, 1.0], [1.0, 0.5, 0.1]], [0.0, 0.05])),
'nlcbc2' : ('Bottom', {'u.[2,1]' : None}, None, 'nodal_combination',
([[1.0, -0.1]], [0.2])),
'nlcbc3' : ('Right', {'u.all' : None}, None, 'nodal_combination',
'get_constraints'),
}
if use_ebcs:
ebcs = {
'fix' : ('Left', {'u.all' : 0.0}),
}
else:
ebcs = {}
lcbcs.update({
'nlcbc4' : ('Left', {'u.all' : None}, None, 'nodal_combination',
(nm.eye(dim), nm.zeros(dim))),
})
equations = {
'elasticity' : """
dw_lin_elastic.2.Omega(m.D, v, u)
= -dw_surface_ltr.2.Right(load.val, v)
""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
return locals()
|
RexFuzzle/sfepy
|
examples/linear_elasticity/nodal_lcbcs.py
|
Python
|
bsd-3-clause
| 7,086
|
[
"VTK"
] |
f7edbb0bdd5681f763c44db069ff050e84075bea7e0107a75f412dafbe3dc0c6
|
#!/usr/local/bin/env python
#=============================================================================================
# Analyze datafiles produced by YANK.
#=============================================================================================
#=============================================================================================
# REQUIREMENTS
#
# The netcdf4-python module is now used to provide netCDF v4 support:
# http://code.google.com/p/netcdf4-python/
#
# This requires NetCDF with version 4 and multithreading support, as well as HDF5.
#=============================================================================================
import os
import os.path
import sys
import math
import numpy as np
import netCDF4 as netcdf # netcdf4-python
from pymbar import MBAR # multistate Bennett acceptance ratio
from pymbar import timeseries # for statistical inefficiency analysis
import simtk.unit as units
import logging
logger = logging.getLogger(__name__)
#=============================================================================================
# PARAMETERS
#=============================================================================================
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
#=============================================================================================
# SUBROUTINES
#=============================================================================================
def show_mixing_statistics(ncfile, cutoff=0.05, nequil=0):
"""
Print summary of mixing statistics.
Parameters
----------
ncfile : netCDF4.Dataset
NetCDF file
cutoff : float, optional, default=0.05
Only transition probabilities above 'cutoff' will be printed
nequil : int, optional, default=0
If specified, only samples nequil:end will be used in analysis
"""
# Get dimensions.
niterations = ncfile.variables['states'].shape[0]
nstates = ncfile.variables['states'].shape[1]
# Compute statistics of transitions.
Nij = np.zeros([nstates,nstates], np.float64)
for iteration in range(nequil, niterations-1):
for ireplica in range(nstates):
istate = ncfile.variables['states'][iteration,ireplica]
jstate = ncfile.variables['states'][iteration+1,ireplica]
Nij[istate,jstate] += 0.5
Nij[jstate,istate] += 0.5
Tij = np.zeros([nstates,nstates], np.float64)
for istate in range(nstates):
Tij[istate,:] = Nij[istate,:] / Nij[istate,:].sum()
# Print observed transition probabilities.
print "Cumulative symmetrized state mixing transition matrix:"
print "%6s" % "",
for jstate in range(nstates):
print "%6d" % jstate,
print ""
for istate in range(nstates):
print "%-6d" % istate,
for jstate in range(nstates):
P = Tij[istate,jstate]
if (P >= cutoff):
print "%6.3f" % P,
else:
print "%6s" % "",
print ""
# Estimate second eigenvalue and equilibration time.
mu = np.linalg.eigvals(Tij)
mu = -np.sort(-mu) # sort in descending order
if (mu[1] >= 1):
logger.info("Perron eigenvalue is unity; Markov chain is decomposable.")
else:
logger.info("Perron eigenvalue is %9.5f; state equilibration timescale is ~ %.1f iterations" % (mu[1], 1.0 / (1.0 - mu[1])))
return
def estimate_free_energies(ncfile, ndiscard=0, nuse=None):
"""
Estimate free energies of all alchemical states.
Parameters
----------
ncfile : NetCDF
Input YANK netcdf file
ndiscard : int, optional, default=0
Number of iterations to discard to equilibration
nuse : int, optional, default=None
Maximum number of iterations to use (after discarding)
TODO
----
* Automatically determine 'ndiscard'.
"""
# Get current dimensions.
niterations = ncfile.variables['energies'].shape[0]
nstates = ncfile.variables['energies'].shape[1]
natoms = ncfile.variables['energies'].shape[2]
# Extract energies.
logger.info("Reading energies...")
energies = ncfile.variables['energies']
u_kln_replica = np.zeros([nstates, nstates, niterations], np.float64)
for n in range(niterations):
u_kln_replica[:,:,n] = energies[n,:,:]
logger.info("Done.")
# Deconvolute replicas
logger.info("Deconvoluting replicas...")
u_kln = np.zeros([nstates, nstates, niterations], np.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
u_kln[state_indices,:,iteration] = energies[iteration,:,:]
logger.info("Done.")
# Compute total negative log probability over all iterations.
u_n = np.zeros([niterations], np.float64)
for iteration in range(niterations):
u_n[iteration] = np.sum(np.diagonal(u_kln[:,:,iteration]))
#logger.info(u_n
# DEBUG
outfile = open('u_n.out', 'w')
for iteration in range(niterations):
outfile.write("%8d %24.3f\n" % (iteration, u_n[iteration]))
outfile.close()
# Discard initial data to equilibration.
u_kln_replica = u_kln_replica[:,:,ndiscard:]
u_kln = u_kln[:,:,ndiscard:]
u_n = u_n[ndiscard:]
# Truncate to number of specified conforamtions to use
if (nuse):
u_kln_replica = u_kln_replica[:,:,0:nuse]
u_kln = u_kln[:,:,0:nuse]
u_n = u_n[0:nuse]
# Subsample data to obtain uncorrelated samples
N_k = np.zeros(nstates, np.int32)
indices = timeseries.subsampleCorrelatedData(u_n) # indices of uncorrelated samples
#print u_n # DEBUG
#indices = range(0,u_n.size) # DEBUG - assume samples are uncorrelated
N = len(indices) # number of uncorrelated samples
N_k[:] = N
u_kln[:,:,0:N] = u_kln[:,:,indices]
logger.info("number of uncorrelated samples:")
logger.info(N_k)
logger.info("")
#===================================================================================================
# Estimate free energy difference with MBAR.
#===================================================================================================
# Initialize MBAR (computing free energy estimates, which may take a while)
logger.info("Computing free energy differences...")
mbar = MBAR(u_kln, N_k)
# Get matrix of dimensionless free energy differences and uncertainty estimate.
logger.info("Computing covariance matrix...")
(Deltaf_ij, dDeltaf_ij) = mbar.getFreeEnergyDifferences()
# # Matrix of free energy differences
logger.info("Deltaf_ij:")
for i in range(nstates):
for j in range(nstates):
print "%8.3f" % Deltaf_ij[i,j],
print ""
# print Deltaf_ij
# # Matrix of uncertainties in free energy difference (expectations standard deviations of the estimator about the true free energy)
logger.info("dDeltaf_ij:")
for i in range(nstates):
for j in range(nstates):
print "%8.3f" % dDeltaf_ij[i,j],
print ""
# Return free energy differences and an estimate of the covariance.
return (Deltaf_ij, dDeltaf_ij)
def estimate_enthalpies(ncfile, ndiscard=0, nuse=None):
"""
Estimate enthalpies of all alchemical states.
Parameters
----------
ncfile : NetCDF
Input YANK netcdf file
ndiscard : int, optional, default=0
Number of iterations to discard to equilibration
nuse : int, optional, default=None
Number of iterations to use (after discarding)
TODO
----
* Automatically determine 'ndiscard'.
* Combine some functions with estimate_free_energies.
"""
# Get current dimensions.
niterations = ncfile.variables['energies'].shape[0]
nstates = ncfile.variables['energies'].shape[1]
natoms = ncfile.variables['energies'].shape[2]
# Extract energies.
logger.info("Reading energies...")
energies = ncfile.variables['energies']
u_kln_replica = np.zeros([nstates, nstates, niterations], np.float64)
for n in range(niterations):
u_kln_replica[:,:,n] = energies[n,:,:]
logger.info("Done.")
# Deconvolute replicas
logger.info("Deconvoluting replicas...")
u_kln = np.zeros([nstates, nstates, niterations], np.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
u_kln[state_indices,:,iteration] = energies[iteration,:,:]
logger.info("Done.")
# Compute total negative log probability over all iterations.
u_n = np.zeros([niterations], np.float64)
for iteration in range(niterations):
u_n[iteration] = np.sum(np.diagonal(u_kln[:,:,iteration]))
#print u_n
# DEBUG
outfile = open('u_n.out', 'w')
for iteration in range(niterations):
outfile.write("%8d %24.3f\n" % (iteration, u_n[iteration]))
outfile.close()
# Discard initial data to equilibration.
u_kln_replica = u_kln_replica[:,:,ndiscard:]
u_kln = u_kln[:,:,ndiscard:]
u_n = u_n[ndiscard:]
# Truncate to number of specified conformations to use
if (nuse):
u_kln_replica = u_kln_replica[:,:,0:nuse]
u_kln = u_kln[:,:,0:nuse]
u_n = u_n[0:nuse]
# Subsample data to obtain uncorrelated samples
N_k = np.zeros(nstates, np.int32)
indices = timeseries.subsampleCorrelatedData(u_n) # indices of uncorrelated samples
#print u_n # DEBUG
#indices = range(0,u_n.size) # DEBUG - assume samples are uncorrelated
N = len(indices) # number of uncorrelated samples
N_k[:] = N
u_kln[:,:,0:N] = u_kln[:,:,indices]
logger.info("number of uncorrelated samples:")
logger.info(N_k)
logger.info("")
# Compute average enthalpies.
H_k = np.zeros([nstates], np.float64) # H_i[i] is estimated enthalpy of state i
dH_k = np.zeros([nstates], np.float64)
for k in range(nstates):
H_k[k] = u_kln[k,k,:].mean()
dH_k[k] = u_kln[k,k,:].std() / np.sqrt(N)
return (H_k, dH_k)
def extract_u_n(ncfile):
"""
Extract timeseries of u_n = - log q(X_n) from store file
where q(X_n) = \pi_{k=1}^K u_{s_{nk}}(x_{nk})
with X_n = [x_{n1}, ..., x_{nK}] is the current collection of replica configurations
s_{nk} is the current state of replica k at iteration n
u_k(x) is the kth reduced potential
Parameters
----------
ncfile : str
The filename of the repex NetCDF file.
Returns
-------
u_n : numpy array of numpy.float64
u_n[n] is -log q(X_n)
TODO
----
Move this to repex.
"""
# Get current dimensions.
niterations = ncfile.variables['energies'].shape[0]
nstates = ncfile.variables['energies'].shape[1]
natoms = ncfile.variables['energies'].shape[2]
# Extract energies.
logger.info("Reading energies...")
energies = ncfile.variables['energies']
u_kln_replica = np.zeros([nstates, nstates, niterations], np.float64)
for n in range(niterations):
u_kln_replica[:,:,n] = energies[n,:,:]
logger.info("Done.")
# Deconvolute replicas
logger.info("Deconvoluting replicas...")
u_kln = np.zeros([nstates, nstates, niterations], np.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
u_kln[state_indices,:,iteration] = energies[iteration,:,:]
logger.info("Done.")
# Compute total negative log probability over all iterations.
u_n = np.zeros([niterations], np.float64)
for iteration in range(niterations):
u_n[iteration] = np.sum(np.diagonal(u_kln[:,:,iteration]))
return u_n
#=============================================================================================
# SHOW STATUS OF STORE FILES
#=============================================================================================
def print_status(store_directory, verbose=False):
"""
Print a quick summary of simulation progress.
Parameters
----------
store_directory : string
The location of the NetCDF simulation output files.
verbose : bool, optional, default=False
Returns
-------
success : bool
True is returned on success; False if some files could not be read.
"""
# Process each netcdf file.
phases = ['solvent', 'complex']
for phase in phases:
# Construct full path to NetCDF file.
fullpath = os.path.join(store_directory, phase + '.nc')
# Check that the file exists.
if (not os.path.exists(fullpath)):
# Report failure.
print "File %s not found." % fullpath
print "Check to make sure the right directory was specified, and 'yank setup' has been run."
return False
# Open NetCDF file for reading.
if verbose: print "Opening NetCDF trajectory file '%(fullpath)s' for reading..." % vars()
ncfile = netcdf.Dataset(fullpath, 'r')
# Read dimensions.
niterations = ncfile.variables['positions'].shape[0]
nstates = ncfile.variables['positions'].shape[1]
natoms = ncfile.variables['positions'].shape[2]
# Print summary.
print "%s" % phase
print " %8d iterations completed" % niterations
print " %8d alchemical states" % nstates
print " %8d atoms" % natoms
# TODO: Print average ns/day and estimated completion time.
# Close file.
ncfile.close()
return True
#=============================================================================================
# ANALYZE STORE FILES
#=============================================================================================
def analyze(source_directory, verbose=False):
"""
Analyze contents of store files to compute free energy differences.
Parameters
----------
source_directory : string
The location of the NetCDF simulation storage files.
verbose : bool, optional, default=False
If True, verbose output will be generated.
"""
# Turn on debug info.
# TODO: Control verbosity of logging output using verbose optional flag.
logging.basicConfig(level=logging.DEBUG)
# Storage for different phases.
data = dict()
phase_prefixes = ['solvent', 'complex']
suffixes = ['explicit', 'implicit']
# Process each netcdf file.
for phase in phase_prefixes:
for suffix in suffixes:
# Construct full path to NetCDF file.
fullpath = os.path.join(source_directory, '%s-%s.nc' % (phase, suffix))
if verbose: print "Attempting to open %s..." % fullpath
# Skip if the file doesn't exist.
if (not os.path.exists(fullpath)): continue
# Open NetCDF file for reading.
logger.info("Opening NetCDF trajectory file '%(fullpath)s' for reading..." % vars())
ncfile = netcdf.Dataset(fullpath, 'r')
# DEBUG
logger.info("dimensions:")
for dimension_name in ncfile.dimensions.keys():
logger.info("%16s %8d" % (dimension_name, len(ncfile.dimensions[dimension_name])))
# Read dimensions.
niterations = ncfile.variables['positions'].shape[0]
nstates = ncfile.variables['positions'].shape[1]
natoms = ncfile.variables['positions'].shape[2]
logger.info("Read %(niterations)d iterations, %(nstates)d states" % vars())
# Read reference PDB file.
#if phase in ['vacuum', 'solvent']:
# reference_pdb_filename = os.path.join(source_directory, "ligand.pdb")
#else:
# reference_pdb_filename = os.path.join(source_directory, "complex.pdb")
#atoms = read_pdb(reference_pdb_filename)
# Check to make sure no self-energies go nan.
#check_energies(ncfile, atoms)
# Check to make sure no positions are nan
#check_positions(ncfile)
# Choose number of samples to discard to equilibration
# TODO: Switch to pymbar.timeseries module.
from pymbar import timeseries
u_n = extract_u_n(ncfile)
[nequil, g_t, Neff_max] = timeseries.detectEquilibration(u_n)
logger.info([nequil, Neff_max])
# Examine acceptance probabilities.
show_mixing_statistics(ncfile, cutoff=0.05, nequil=nequil)
# Estimate free energies.
(Deltaf_ij, dDeltaf_ij) = estimate_free_energies(ncfile, ndiscard = nequil)
# Estimate average enthalpies
(DeltaH_i, dDeltaH_i) = estimate_enthalpies(ncfile, ndiscard = nequil)
# Accumulate free energy differences
entry = dict()
entry['DeltaF'] = Deltaf_ij[0,nstates-1]
entry['dDeltaF'] = dDeltaf_ij[0,nstates-1]
entry['DeltaH'] = DeltaH_i[nstates-1] - DeltaH_i[0]
entry['dDeltaH'] = np.sqrt(dDeltaH_i[0]**2 + dDeltaH_i[nstates-1]**2)
data[phase] = entry
# Get temperatures.
ncvar = ncfile.groups['thermodynamic_states'].variables['temperatures']
temperature = ncvar[0] * units.kelvin
kT = kB * temperature
# Close input NetCDF file.
ncfile.close()
# Compute hydration free energy (free energy of transfer from vacuum to water)
#DeltaF = data['vacuum']['DeltaF'] - data['solvent']['DeltaF']
#dDeltaF = numpy.sqrt(data['vacuum']['dDeltaF']**2 + data['solvent']['dDeltaF']**2)
#print "Hydration free energy: %.3f +- %.3f kT (%.3f +- %.3f kcal/mol)" % (DeltaF, dDeltaF, DeltaF * kT / units.kilocalories_per_mole, dDeltaF * kT / units.kilocalories_per_mole)
# Compute enthalpy of transfer from vacuum to water
#DeltaH = data['vacuum']['DeltaH'] - data['solvent']['DeltaH']
#dDeltaH = numpy.sqrt(data['vacuum']['dDeltaH']**2 + data['solvent']['dDeltaH']**2)
#print "Enthalpy of hydration: %.3f +- %.3f kT (%.3f +- %.3f kcal/mol)" % (DeltaH, dDeltaH, DeltaH * kT / units.kilocalories_per_mole, dDeltaH * kT / units.kilocalories_per_mole)
# Read standard state correction free energy.
DeltaF_restraints = 0.0
phase = 'complex'
fullpath = os.path.join(source_directory, phase + '.nc')
ncfile = netcdf.Dataset(fullpath, 'r')
DeltaF_restraints = ncfile.groups['metadata'].variables['standard_state_correction'][0]
ncfile.close()
# Compute binding free energy.
DeltaF = data['solvent']['DeltaF'] - DeltaF_restraints - data['complex']['DeltaF']
dDeltaF = np.sqrt(data['solvent']['dDeltaF']**2 + data['complex']['dDeltaF']**2)
logger.info("")
logger.info("Binding free energy : %16.3f +- %.3f kT (%16.3f +- %.3f kcal/mol)" % (DeltaF, dDeltaF, DeltaF * kT / units.kilocalories_per_mole, dDeltaF * kT / units.kilocalories_per_mole))
logger.info("")
#logger.info("DeltaG vacuum : %16.3f +- %.3f kT" % (data['vacuum']['DeltaF'], data['vacuum']['dDeltaF']))
logger.info("DeltaG solvent : %16.3f +- %.3f kT" % (data['solvent']['DeltaF'], data['solvent']['dDeltaF']))
logger.info("DeltaG complex : %16.3f +- %.3f kT" % (data['complex']['DeltaF'], data['complex']['dDeltaF']))
logger.info("DeltaG restraint : %16.3f kT" % DeltaF_restraints)
logger.info("")
# Compute binding enthalpy
DeltaH = data['solvent']['DeltaH'] - DeltaF_restraints - data['complex']['DeltaH']
dDeltaH = np.sqrt(data['solvent']['dDeltaH']**2 + data['complex']['dDeltaH']**2)
logger.info("Binding enthalpy : %16.3f +- %.3f kT (%16.3f +- %.3f kcal/mol)" % (DeltaH, dDeltaH, DeltaH * kT / units.kilocalories_per_mole, dDeltaH * kT / units.kilocalories_per_mole))
|
luirink/yank
|
Yank/analyze-old.py
|
Python
|
lgpl-3.0
| 19,753
|
[
"NetCDF"
] |
3505ddf1288b829c6c10b59da6397ab9903cc0b06cce990eab71e42f0e710910
|
import numpy
import chainerx
# TODO(sonots): Implement in C++, especially in CUDA
def normal(*args, **kwargs):
"""normal(*args, **kwargs, device=None)
Draws random samples from a normal (Gaussian) distribution.
This is currently equivalent to :func:`numpy.random.normal`
wrapped by :func:`chainerx.array`, given the device argument.
.. seealso:: :func:`numpy.random.normal`
"""
device = kwargs.pop('device', None)
a = numpy.random.normal(*args, **kwargs)
return chainerx.array(a, device=device, copy=False)
# TODO(sonots): Implement in C++, especially in CUDA
def uniform(*args, **kwargs):
"""uniform(*args, **kwargs, device=None)
Draws samples from a uniform distribution.
This is currently equivalent to :func:`numpy.random.normal`
wrapped by :func:`chainerx.array`, given the device argument.
.. seealso:: :func:`numpy.random.uniform`
"""
device = kwargs.pop('device', None)
a = numpy.random.uniform(*args, **kwargs)
return chainerx.array(a, device=device, copy=False)
|
okuta/chainer
|
chainerx/random/distributions.py
|
Python
|
mit
| 1,057
|
[
"Gaussian"
] |
c3be5c3455e883dd408d79b551c93bec543908728dccae9657fed73aa24e70a5
|
import logging
import numpy
from reikna.algorithms.pureparallel import PureParallel
from reikna.core.signature import Parameter, Annotation
import neuro
log = logging.getLogger("lwta")
def lwta(ctx, mat, lwta_size):
kernel_cache = ctx.kernel_cache
lwta_size = numpy.float32(lwta_size)
thread = ctx.thread
key = (lwta, mat.dtype, mat.shape, lwta_size)
if key not in kernel_cache.keys():
num_units = mat.shape[1]
log.info("compiling " + str(key))
kernel = PureParallel(
[
Parameter('mat', Annotation(mat, 'io'))
],
"""
SIZE_T this_idx = ${idxs[1]};
SIZE_T group_size = ${lwta_size};
// only the first thread per group computes anything
if (this_idx % group_size == 0) {
SIZE_T argmax = ${idxs[1]};
SIZE_T candidate_idx;
${mat.ctype} ma = ${mat.load_same};
${mat.ctype} candidate_value;
// find the argmax in the group
for (SIZE_T i=1; i < group_size; i++) {
candidate_idx = this_idx + i;
if (candidate_idx >= ${num_units}) break;
candidate_value = ${mat.load_idx}(${idxs[0]}, candidate_idx);
if ( candidate_value > ma) {
ma = candidate_value;
argmax = candidate_idx;
}
}
// second pass: zero all except argmax
for (SIZE_T i=0; i < group_size; i++) {
candidate_idx = this_idx + i;
if (candidate_idx >= ${num_units}) break;
if ( candidate_idx != argmax ) {
${mat.store_idx}(${idxs[0]}, candidate_idx, 0.0f);
}
}
}
""", guiding_array='mat', render_kwds=dict(lwta_size=lwta_size, num_units=num_units))
kernel_cache[key] = kernel.compile(thread)
kernel_cache[key](mat)
class LWTANetwork(object):
"""
Groups each layer's neurons. Only the neuron with maximum activation per group may be active.
"""
def __init__(self, **kwargs):
super(LWTANetwork, self).__init__(**kwargs)
log.info("LWTANetwork constructor")
self.lwta_sizes = [0] # no LWTA for the inputs
def add_layer(self, *args, **kwargs):
super(LWTANetwork, self).add_layer(*args, **kwargs)
lwta_size = kwargs.get('lwta', 0)
self.lwta_sizes.append(lwta_size)
def after_activation(self, layer_index, state, **kwargs):
'''
LWTA is applied AFTER the activation/transfer function has been
applied. (see page 3 in http://www.idsia.ch/idsiareport/IDSIA-04-13.pdf)
:param layer_index: the index of the current layer
:param state: network state
'''
super(LWTANetwork, self).after_activation(layer_index, state, **kwargs)
lwta_size = self.lwta_sizes[layer_index]
if lwta_size > 1:
activations = state.activations[layer_index]
lwta(self.context, activations, lwta_size)
|
schreon/neuronaut
|
neuro/lwta.py
|
Python
|
mit
| 3,116
|
[
"NEURON"
] |
ba21e2ffd10afbd8def5f0b1eb6946df32b5b44a6df3192ba555953196023187
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exoline - Exosite IoT Command Line
https://github.com/exosite/exoline
Usage:
exo [--help] [options] <command> [<args> ...]
Commands:
{{ command_list }}
Options:
--host=<host> OneP host. Default is $EXO_HOST or m2.exosite.com
--port=<port> OneP port. Default is $EXO_PORT or 443
-c --config=<file> Config file Default is $EXO_CONFIG or ~/.exoline
--httptimeout=<sec> HTTP timeout [default: 60] (default for copy is 480)
--https Enable HTTPS (deprecated, HTTPS is default)
--http Disable HTTPS
--useragent=<ua> Set User-Agent Header for outgoing requests
--debug Show debug info (stack traces on exceptions)
-d --debughttp Turn on debug level logging in pyonep
--curl Show curl calls for requests. Implies --debughttp
--discreet Obfuscate RIDs in stdout and stderr
-e --clearcache Invalidate Portals cache after running command
--portals=<server> Portals server [default: https://portals.exosite.com]
-t --vendortoken=<vt> Vendor token (/admin/home in Portals)
-n --vendor=<vendor> Vendor identifier (/admin/managemodels in Portals)
(See http://github.com/exosite/exoline#provisioning)
-h --help Show this screen
-v --version Show version
See 'exo <command> --help' for more information on a specific command.
"""
# Copyright (c) 2015, Exosite, LLC
# All rights reserved
from __future__ import unicode_literals
import sys
import os
import json
if sys.version_info < (3, 0):
import unicodecsv as csv
else:
import csv
import platform
import re
from datetime import datetime
from datetime import timedelta
import time
from pprint import pprint
from operator import itemgetter
import logging
from collections import defaultdict
import copy
import difflib
import warnings
import six
from six import StringIO
from six import iteritems
from six import string_types
# python 2.6 support
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import itertools
import math
import glob
from docopt import docopt
from dateutil import parser
from dotenv import Dotenv
import requests
import yaml
import importlib
import humanize
from pyonep import onep
from pyonep import provision
import pyonep
try:
from ..exoline import __version__
from ..exoline.exocommon import ExoException
from ..exoline import exocommon
from ..exoline import serieswriter
except:
from exoline import __version__
from exoline.exocommon import ExoException
from exoline import exocommon
from exoline import serieswriter
DEFAULT_HOST = 'm2.exosite.com'
DEFAULT_PORT = '80'
DEFAULT_PORT_HTTPS = '443'
DEFAULT_CONFIG = '~/.exoline'
SCRIPT_LIMIT_BYTES = 16 * 1024
PERF_DATA = []
cmd_doc = OrderedDict([
('read',
'''Read data from a resource.\n\nUsage:
exo [options] read <cik> [<rid> ...]
Command options:
--follow continue reading (ignores --end)
--limit=<limit> number of data points to read [default: 1]
--start=<time>
--end=<time> start and end times (see details below)
--tz=<TZ> Olson TZ name
--sort=<order> asc or desc [default: desc]
--selection=all|autowindow|givenwindow downsample method [default: all]
--format=csv|raw output format [default: csv]
--timeformat=unix|human|iso8601|excel
unix timestamp, human-readable, or spreadsheet-
compatible? [default: human]
--header=name|rid include a header row
--chunksize=<size> [default: 212] break read into requests of
length <size>, printing data as it is received.
{{ helpoption }}
If <rid> is omitted, reads all datasources and datarules under <cik>.
All output is in UTC.
{{ startend }}'''),
('write',
'''Write data at the current time.\n\nUsage:
exo [options] write <cik> [<rid>] --value=<value>
exo [options] write <cik> [<rid>] -
The - form takes the value to write from stdin. For example:
$ echo '42' | exo write 8f21f0189b9acdc82f7ec28dc0c54ccdf8bc5ade myDataport -'''),
('record',
'''Write data at a specified time.\n\nUsage:
exo [options] record <cik> [<rid>...] [-]
exo [options] record <cik> [<rid>] (--value=<timestamp,value> ...)
exo [options] record <cik> [<rid>] --interval=<seconds> ((--value=<value> ...) | -)
Can take a CSV file on STDIN and record the values to dataports. The file must have the
first column to be unix timestamps for each row. The remaining columns are data to be
recorded at those timestamps. Each column is identified by the <rid> arguments.
The CSV must not have a header row.
For example:
$ exo record aCIK dpA dpB dpC - < my.csv
Will take the CSV file, my.csv, that has four columns. Record that data into the dataports
with aliases dpA, dpB, and dpC on the shortcut aCIK.
Command options:
--interval generates timestamps at a regular interval into the past.
--chunksize=<lines> [default: 212] break record into requests of length <lines>
'''),
('create',
'''Create a resource from a json description passed on stdin (with -),
or using command line shorthand (other variants).\n\nUsage:
exo [options] create <cik> (--type=client|clone|dataport|datarule|dispatch) -
exo [options] create <cik> --type=client
exo [options] create <cik> --type=dataport (--format=float|integer|string)
Command options:
--name=<name set a resource name (overwriting the one in stdin if present)
--alias=<alias> set an alias
--ridonly output the RID by itself on a line
--cikonly output the CIK by itself on a line (--type=client only)
{{ helpoption }}
Details:
Pass - and a json description object on stdin, or leave it off to use defaults.
Description is documented here:
https://github.com/exosite/docs/tree/master/rpc#create-client
https://github.com/exosite/docs/tree/master/rpc#create-dataport
https://github.com/exosite/docs/tree/master/rpc#create-datarule
If - is not present, creates a resource with common defaults.'''),
('listing',
'''List the RIDs of a client's children.\n\nUsage:
exo [options] listing <cik> [<rid>]
Command options:
--types=<type1>,... which resource types to list
[default: client,dataport,datarule,dispatch]
--filters=<f1>,... criteria for which resources to include
[default: owned]
activated resources shared with and activated
by client (<cik>)
aliased resources aliased by client (<cik>)
owned resources owned by client (<cik>)
public public resources
--tagged=<tag1>,... resources that have been tagged by any client, and
that the client (<cik>) has read access to.
--plain show only the child RIDs
--pretty pretty print output'''),
# ('whee',
# '''Super-fast info tree.\n\nUsage:
# exo [options] whee <cik>'''),
('info',
'''Get metadata for a resource in json format.\n\nUsage:
exo [options] info <cik> [<rid>]
Command options:
--cikonly print CIK by itself
--pretty pretty print output
--recursive embed info for any children recursively
--level=<num> number of levels to recurse through the client tree
--include=<key list>
--exclude=<key list>
comma separated list of info keys to include and exclude.
Available keys are aliases, basic, counts, description,
key, shares, subscribers, tags, usage. If omitted,
all available keys are returned.'''),
('update',
'''Update a resource from a json description passed on stdin.\n\nUsage:
exo [options] update <cik> <rid> -
For details see https://github.com/exosite/docs/tree/master/rpc#update'''),
('map',
'''Add an alias to a resource.\n\nUsage:
exo [options] map <cik> <rid> <alias>'''),
('unmap',
'''Remove an alias from a resource.\n\nUsage:
exo [options] unmap <cik> <alias>'''),
('lookup',
'''Look up a resource's RID based on its alias cik.\n\nUsage:
exo [options] lookup <cik> [<alias>]
exo [options] lookup <cik> --owner-of=<rid>
exo [options] lookup <cik> --share=<code>
exo [options] lookup <cik> --cik=<cik-to-find>
If <alias> is omitted, the rid for <cik> is returned. This is equivalent to:
exo lookup <cik> ""
The --owner-of variant returns the RID of the immediate parent (owner)
of <rid>.
The --share variant returns the RID associated with a share code'''),
('drop',
'''Drop (permanently delete) a resource.\n\nUsage:
exo [options] drop <cik> [<rid> ...]
Command options:
--all-children drop all children of the resource.
{{ helpoption }}
Warning: if the resource is a client with a serial number
associated with it, the serial number is not released.'''),
('flush',
'''Remove time series data from a resource.\n\nUsage:
exo [options] flush <cik> [<rid>]
Command options:
--start=<time> flush all points newer than <time> (exclusive)
--end=<time> flush all points older than <time> (exclusive)
If --start and --end are both omitted, all points are flushed.'''),
('usage',
'''Display usage of One Platform resources over a time period.\n\nUsage:
exo [options] usage <cik> [<rid>] --start=<time> [--end=<time>]
{{ startend }}'''),
('tree', '''Display a resource's descendants.\n\nUsage:
exo [options] tree [--verbose] [--values] <cik>
Command options:
--level=<num> depth to traverse, omit or -1 for no limit [default: -1]'''),
('twee', '''Display a resource's descendants. Like tree, but more wuvable.\n\nUsage:
exo [options] twee <cik>
Command options:
--nocolor don't use color in output (color is always off in Windows)
--level=<num> depth to traverse, omit or -1 for no limit [default: -1]
--rids show RIDs instead CIKs below the top level
Example:
$ exo twee 7893635162b84f78e4475c2d6383645659545344
Temporary CIK cl cik: 7893635162b84f78e4475c2d6383645659545344
├─ dp.i rid.098f1: 77 (just now)
└─ dp.s config: {"a":1,"b":2} (21 seconds ago)
$ exo read 7893635162b84f78e4475c2d6383645659545344 rid.098f1
2014-09-12 13:48:28-05:00,77
$ exo read 7893635162b84f78e4475c2d6383645659545344 config --format=raw
{"a":1,"b":2}
$ exo info 7893635162b84f78e4475c2d6383645659545344 --include=description --pretty
{
"description": {
"limits": {
"client": 1,
"dataport": 10,
"datarule": 10,
"disk": "inherit",
"dispatch": 10,
"email": 5,
"email_bucket": "inherit",
"http": 10,
"http_bucket": "inherit",
"share": 5,
"sms": 0,
"sms_bucket": 0,
"xmpp": 10,
"xmpp_bucket": "inherit"
},
"locked": false,
"meta": "",
"name": "Temporary CIK",
"public": false
}
}
'''),
('find', '''Search resource's descendants for matches.\n\nUsage:
exo find <cik> --match <matches> [--show <shows>]
Command options:
--show=<shows> Things to show on match (default: cik)
--match=<matches> List of --match x=y,z=w to match on (supported operations: ^ (not), >, <, =)
Example:
$ exo find $CIK --match "status=activated,model=$CLIENT_MODEL"
7893635162b84f78e4475c2d6383645659545344
7893635162b84f78e4475c2d6383645659545341
7893635162b84f78e4475c2d6383645659545342
$ exo find $CIK --match "model=$CLIENT_MODEL" --show="status,sn"
activated A8-UQN6L7-TUMCN0-PNZMH
activated A8-KJGJS3-WRC1RK-S9ECK
activated A8-K3CFRF-NP3NH3-2B7UA
activated A8-0KP131-C1QFXQ-4HCU4
$ exo find $CIK --match "status=activated,model=$CLIENT_MODEL" --show='basic'
{u'status': u'activated', u'type': u'client', u'modified': 1429041332, u'subscribers': 0}
{u'status': u'activated', u'type': u'client', u'modified': 1430422683, u'subscribers': 0}
{u'status': u'activated', u'type': u'client', u'modified': 1431013655, u'subscribers': 0}
{u'status': u'activated', u'type': u'client', u'modified': 1431013616, u'subscribers': 0}
To use the CIKs that are output from the find command, pipe to xargs
$ exo find $CIK --match "model=$CLIENT_MODEL" | xargs -I cik sh -c 'printf "cik\t"; exo read cik elapsed_time --time=unix'
7893635162b84f78e4475c2d6383645659545344 1431203202,4398
7893635162b84f78e4475c2d6383645659545342 1431203197,4338
To find all devices that aren't activated:
$ exo find portal --match "status^activated" --show "name,cik,status"
$ # they're all activated
The output from find is tab delimited.
'''),
('script', '''Upload a Lua script\n\nUsage:
exo [options] script <cik> [<rid>] --file=<script-file>
exo [options] script <script-file> <cik> ...
Both forms do the same thing, but --file is the recommended one.
If <rid> is omitted, the file name part of <script-file> is used
as both the alias and name of the script. This convention helps
when working with scripts in Portals, because Portals shows the
script resource's name but not its alias.
Command options:
--name=<name> script name, if different from script filename. The name
is used to identify the script, too.
--recursive operate on client and any children
--create create the script if it doesn't already exist
--follow monitor the script's debug log
--setversion=<vn> set a version number on the script meta'''),
('spark', '''Show distribution of intervals between points.\n\nUsage:
exo [options] spark <cik> [<rid>] --days=<days>
Command options:
--stddev=<num> exclude intervals more than num standard deviations from mean
{{ helpoption }}'''),
('copy', '''Make a copy of a client.\n\nUsage:
exo [options] copy <cik> <destination-cik>
Copies <cik> and all its non-client children to <destination-cik>.
Returns CIK of the copy. NOTE: copy excludes all data in dataports.
Command options:
--cikonly show unlabeled CIK by itself
{{ helpoption }}'''),
('diff', '''Show differences between two clients.\n\nUsage:
exo [options] diff <cik> <cik2>
Displays differences between <cik> and <cik2>, including all non-client
children. If clients are identical, nothing is output. For best results,
all children should have unique names.
Command options:
--full compare all info, even usage, data counts, etc.
--no-children don't compare children
{{ helpoption }}'''),
('ip', '''Get IP address of the server.\n\nUsage:
exo [options] ip'''),
('data', '''Read or write with the HTTP Data API.\n\nUsage:
exo [options] data <cik> [--write=<alias,value> ...] [--read=<alias> ...]
If only --write arguments are specified, the call is a write.
If only --read arguments are specified, the call is a read.
If both --write and --read arguments are specified, the hybrid
write/read API is used. Writes are executed before reads.'''),
('portals', '''Invalidate the Portals cache for a CIK by telling Portals
a particular procedure was taken on client identified by <cik>.\n\nUsage:
exo [options] portals clearcache <cik> [<procedure> ...]
<procedure> may be any of:
activate, create, deactivate, drop, map, revoke, share, unmap, update
If no <procedure> is specified, Exoline tells Portals that all of the
procedures on the list were performed on the client.
Warning: drop does not invalidate the cache correctly. Instead, use create.
'''),
('share', '''Generate a code that allows non-owners to access resources\n\nUsage:
exo [options] share <cik> <rid> [--meta=<string> [--share=<code-to-update>]]
Pass --meta to associate a metadata string with the share.
Pass --share to update metadata for an existing share.'''),
('revoke', '''Revoke a share code\n\nUsage:
exo [options] revoke <cik> --share=<code>'''),
('activate', '''Activate a share code\n\nUsage:
exo [options] activate <cik> --share=<code>
If you want to activate a *device*, use the "sn activate"
command instead'''),
('deactivate', '''Deactivate a share code\n\nUsage:
exo [options] deactivate <cik> --share=<code>'''),
('clone', '''Create a clone of a client\n\nUsage:
exo [options] clone <cik> (--rid=<rid> | --share=<code>)
Command options:
--noaliases don't copy aliases
--nohistorical don't copy time series data
--noactivate don't activate CIK of clone (client only)
The clone command copies the client resource specified by --rid or --share
into the client specified by <cik>.
For example, to clone a portals device, pass the portal CIK as <cik> and
the device RID as <rid>. The portal CIK can be found in Portals
https://<yourdomain>.exosite.com/account/portals, where it says Key: <cik>.
A device's RID can be obtained using exo lookup <device-cik>.
The clone and copy commands do similar things, but clone uses the RPC's
create (clone) functionality, which is more full featured.
https://github.com/exosite/docs/tree/master/rpc#create-clone
Use the clone command except if you need to copy a device to another portal.''')
])
# shared sections of documentation
doc_replace = {
'{{ startend }}': '''<time> can be a unix timestamp or formatted like any of these:
2011-10-23T08:00:00-07:00
10/1/2012
"2012-10-23 14:01 UTC"
"2012-10-23 14:01"
If timezone information is omitted, local timezone is assumed
If time part is omitted, it assumes 00:00:00.
To report through the present time, omit --end or pass --end=now''',
'{{ helpoption }}': ''' -h --help Show this screen.''',
}
dotpath = os.path.join(os.getcwd(), '.env')
if os.path.exists(dotpath):
dotenv=Dotenv(dotpath)
os.environ.update(dotenv)
plugins = []
if platform.system() != 'Windows':
# load plugins. use timezone because this file may be running
# as a script in some other location.
default_plugin_path = os.path.join(os.path.dirname(exocommon.__file__), 'plugins')
plugin_paths = os.getenv('EXO_PLUGIN_PATH', default_plugin_path).split(':')
for plugin_path in [i for i in plugin_paths if len(i) > 0]:
plugin_names = [os.path.basename(f)[:-3]
for f in glob.glob(plugin_path + "/*.py")
if not os.path.basename(f).startswith('_')]
for module_name in plugin_names:
try:
plugin = importlib.import_module('plugins.' + module_name)
except Exception as ex:
# TODO: only catch the not found exception, for plugin
# debugging
#print(ex)
try:
plugin = importlib.import_module('exoline.plugins.' + module_name, package='test')
except Exception as ex:
plugin = importlib.import_module('exoline.plugins.' + module_name)
# instantiate plugin
p = plugin.Plugin()
plugins.append(p)
# get documentation
command = p.command()
if isinstance(command, six.string_types):
cmd_doc[command] = plugin.__doc__
else:
for c in command:
cmd_doc[c] = p.doc(c)
else:
# plugin support for Windows executable build
try:
# spec plugin
try:
from ..exoline.plugins import spec
except:
from exoline.plugins import spec
p = spec.Plugin()
plugins.append(p)
cmd_doc[p.command()] = spec.__doc__
# transform plugin
try:
from ..exoline.plugins import transform
except:
from exoline.plugins import transform
p = transform.Plugin()
plugins.append(p)
cmd_doc[p.command()] = transform.__doc__
# provision plugin
try:
from ..exoline.plugins import provision as provisionPlugin
except:
from exoline.plugins import provision as provisionPlugin
p = provisionPlugin.Plugin()
plugins.append(p)
for c in p.command():
cmd_doc[c] = p.doc(c)
# search plugin
try:
from ..exoline.plugins import search
except:
from exoline.plugins import search
p = search.Plugin()
plugins.append(p)
cmd_doc[p.command()] = search.__doc__
# dump plugin
try:
from ..exoline.plugins import dump
except:
from exoline.plugins import dump
p = dump.Plugin()
plugins.append(p)
cmd_doc[p.command()] = dump.__doc__
# keys plugin
try:
from ..exoline.plugins import keys
except:
from exoline.plugins import keys
p = keys.Plugin()
plugins.append(p)
cmd_doc[p.command()] = keys.__doc__
# switches plugin
try:
from ..exoline.plugins import switches
except:
from exoline.plugins import switches
p = switches.Plugin()
plugins.append(p)
cmd_doc[p.command()] = switches.__doc__
# aliases plugin
try:
from ..exoline.plugins import aliases
except:
from exoline.plugins import aliases
p = aliases.Plugin()
plugins.append(p)
cmd_doc[p.command()] = aliases.__doc__
except Exception as ex:
import traceback
traceback.print_exc()
pprint(ex)
# perform substitutions on command documentation
for k in cmd_doc:
# helpoption is appended to any commands that don't already have it
if '{{ helpoption }}' not in cmd_doc[k]:
cmd_doc[k] += '\n\nCommand options:\n{{ helpoption }}'
for r in doc_replace:
cmd_doc[k] = cmd_doc[k].replace(r, doc_replace[r])
class ExoConfig:
'''Manages the config file, grouping all realted actions'''
regex_rid = re.compile("[0-9a-fA-F]{40}")
def __init__(self, configfile='~/.exoline'):
configfile = self.realConfigFile(configfile)
self.loadConfig(configfile)
def realConfigFile(self, configfile):
'''Find real path for a config file'''
# Does the file as passed exist?
cfgf = os.path.expanduser(configfile)
if os.path.exists(cfgf):
return cfgf
# Is it in the exoline folder?
cfgf = os.path.join('~/.exoline', configfile)
cfgf = os.path.expanduser(cfgf)
if os.path.exists(cfgf):
return cfgf
# Or is it a dashed file?
cfgf = '~/.exoline-' + configfile
cfgf = os.path.expanduser(cfgf)
if os.path.exists(cfgf):
return cfgf
# No such file to load.
return None
def loadConfig(self, configfile):
if configfile is None:
self.config = {}
else:
try:
with open(configfile) as f:
self.config = yaml.safe_load(f)
except IOError as ex:
self.config = {}
def lookup_shortcut(self, cik):
'''If a CIK has client/resource parts, seperate and look thouse up'''
if ':c' in cik:
# break into parts, then lookup each.
c,g,r = cik.partition(':c')
cik = { 'cik': self._lookup_shortcut(c),
'client_id': self._lookup_shortcut(r) }
elif ':r' in cik:
c,g,r = cik.partition(':r')
cik = { 'cik': self._lookup_shortcut(c),
'resource_id': self._lookup_shortcut(r) }
else:
# look it up, then check again for parts.
cik = self._lookup_shortcut(cik)
if ':c' in cik:
c,g,r = cik.partition(':c')
cik = {'cik': c, 'client_id': r}
elif ':r' in cik:
c,g,r = cik.partition(':r')
cik = {'cik': c, 'resource_id': r}
return cik
def _lookup_shortcut(self, cik):
'''Look up what was passed for cik in config file
if it doesn't look like a CIK.'''
if self.regex_rid.match(cik) is None:
if 'keys' in self.config:
if cik in self.config['keys']:
return self.config['keys'][cik].strip()
elif cik.isdigit() and int(cik) in self.config['keys']:
return self.config['keys'][int(cik)].strip()
else:
raise ExoException('No CIK shortcut {0}\n{1}'.format(
cik, '\n'.join(sorted(map(str, self.config['keys'])))))
else:
raise ExoException('Tried a CIK shortcut {0}, but found no keys'.format(cik))
else:
return cik
def mingleArguments(self, args):
'''This mixes the settings applied from the configfile, the command line and the ENV.
Command line always overrides ENV which always overrides configfile.
'''
# This ONLY works with options that take a parameter.
toMingle = ['host', 'port', 'httptimeout', 'useragent', 'portals', 'vendortoken', 'vendor']
# Precedence: ARGV then ENV then CFG
# Looks for ENV vars and pull them in, unless in ARGV
for arg in toMingle:
if args['--'+arg] is None:
env = os.getenv('EXO_'+arg.upper())
if env is not None:
args['--'+arg] = env
# Look for CFG vars and pull them in, unless in ARGV
for arg in toMingle:
if arg in self.config and args['--'+arg] is None:
args['--'+arg] = self.config[arg]
# Copy all ARGV vars to CFG for uniform lookups.
for arg in toMingle:
self.config[arg] = args['--'+arg]
exoconfig = ExoConfig(os.getenv('EXO_CONFIG', DEFAULT_CONFIG))
class ExolineOnepV1(onep.OnepV1):
'''Subclass that re-adds deprecated commands needed for devices created
in Portals before the commands were deprecated.'''
def _callJsonRPC(self, cik, callrequests, returnreq=False, notimeout=False):
'''Time all calls to _callJsonRPC'''
try:
ts = time.time()
procedures = [cr['procedure'] for cr in callrequests]
r = onep.OnepV1._callJsonRPC(self, cik, callrequests, returnreq, notimeout=notimeout)
except:
raise
finally:
te = time.time()
PERF_DATA.append({'cik': cik, 'procedures': procedures, 'seconds': te-ts})
return r
def comment(self, cik, rid, visibility, comment, defer=False):
return self._call('comment', cik, [rid, visibility, comment], defer)
class ExoRPC():
'''Wrapper for pyonep RPC API.
Raises exceptions on error and provides some reasonable defaults.'''
regex_rid = re.compile("[0-9a-fA-F]{40}")
regex_tweeid = re.compile("rid\.[0-9a-fA-F]{5}")
class RPCException(Exception):
pass
def __init__(self,
host=DEFAULT_HOST,
port=None,
httptimeout=60,
https=False,
verbose=True,
logrequests=False,
user_agent=None,
curldebug=False):
if port is None:
port = DEFAULT_PORT_HTTPS if https else DEFAULT_PORT
if user_agent is None:
user_agent = "Exoline {0}".format(__version__)
self.exo = ExolineOnepV1(
host=host,
port=port,
httptimeout=httptimeout,
https=https,
agent=user_agent,
reuseconnection=True,
logrequests=logrequests,
curldebug=curldebug)
def _raise_for_response(self, isok, response, call=None):
if not isok:
if call is None:
msg = str(response)
else:
msg = '{0} ({1})'.format(str(response), str(call))
raise ExoRPC.RPCException(msg)
def _raise_for_response_record(self, isok, response):
'''Undocumented RPC behavior-- if record timestamps are invalid, isok
is True but response is an array of timestamps and error
messages.'''
self._raise_for_response(isok, response)
if type(response) is list:
raise ExoRPC.RPCException(', '.join(['{0}: {1}'.format(msg, t) for msg, t in response]))
def _raise_for_deferred(self, responses):
r = []
for call, isok, response in responses:
self._raise_for_response(isok, response, call=call)
r.append(response)
return r
def mult(self, cik, commands):
return self._exomult(cik, commands)
def _check_exomult(self, auth):
if not (isinstance(auth, six.string_types) or type(auth) is dict):
raise Exception("_exomult: unexpected type for auth " + str(auth))
assert(not self.exo.has_deferred(auth))
def _exomult(self, auth, commands):
'''Takes a list of onep commands with cik omitted, e.g.:
[['info', {alias: ""}], ['listing', ['dataport'], {}, {'alias': ''}]'''
if len(commands) == 0:
return []
self._check_exomult(auth)
for c in commands:
if type(c) is not list:
raise Exception("_exomult: found invalid command " + str(c))
method = getattr(self.exo, c[0])
method(auth, *c[1:], defer=True)
assert(self.exo.has_deferred(auth))
r = self.exo.send_deferred(auth)
responses = self._raise_for_deferred(r)
return responses
def _exomult_with_responses(self, auth, commands):
'''Like _exomult, but returns full responses and does not raise
an exception for individual response errors. Call this if errors
from particular calls are not fatal. General RPC errors still
raise exceptions, though.'''
if len(commands) == 0:
return []
self._check_exomult(auth)
for c in commands:
if type(c) is not list:
raise Exception("_exomult: found invalid command " + str(c))
method = getattr(self.exo, c[0])
method(auth, *c[1:], defer=True)
r = self.exo.send_deferred(auth)
results = map(self._undo_pyonep_response_mangling, r)
return results
def _undo_pyonep_response_mangling(self, pyonep_response):
'''pyonep mixes RPC responses up, setting isok to status=='ok'
and response to either response or the status if status is not 'ok'.
This undoes that since that's the way pyonep should go.'''
call, isok, r = pyonep_response
if isok:
return {'status': 'ok', 'result': r}
else:
return {'status': r}
def _exobatch(self, auth, commands, batchsize=25):
'''Performs a set of commands, breaking them into batches of at most batchsize
to prevent timeout.
auth - either a cik or an auth dict
commands - a list of commandset objects like this:
{'commands': [['info', rid, options]],
'callback': lambda(commandset, result)}
batchsize - the maximum number of commands/command objects to include
in each RPC request.
Returns a list of responses in the form {'status': !'ok'} on failure or
{'status': 'ok', 'result': result}
If any overall failures occur, an exception is raised.'''
# break calls into chunks to prevent timeout
def chunks(l, n):
'''Yield successive n-sized chunks from l.'''
for i in range(0, len(l), n):
yield l[i:i+n]
for commandchunk in chunks(commands, batchsize):
cmds = []
for commandset in commandchunk:
cmds = cmds + commandset['commands']
#sys.stderr.write('_exomult_with_responses with {0} commands.\n'.format(len(cmds)))
cmd_responses = self._exomult_with_responses(auth, cmds)
result_index = 0
# stitch the flattened result list into command sets
# and call the command set callbacks
for i, commandset in enumerate(commandchunk):
commandset_responses = []
for cmd in commandset['commands']:
commandset_responses.append(cmd_responses[result_index])
result_index += 1
if 'callback' in commandset:
commandset['callback'](commandset, commandset_responses)
yield commandset_responses
def wait(self, auth, rid, since=None, timeout=None):
'''Returns timedout, point. If timedout is True,
point is None'''
options = {}
if since is not None:
options['since'] = since
if timeout is not None:
options['timeout'] = timeout
isok, response = self.exo.wait(
auth,
rid,
options)
if not isok and response=='expire':
return True, None
else:
self._raise_for_response(isok, response)
return False, response
def _readoptions(self, limit, sort, starttime, endtime, selection):
options ={'limit': limit,
'sort': sort,
'selection': selection}
if starttime is not None:
options['starttime'] = int(starttime)
if endtime is not None:
options['endtime'] = int(endtime)
return options
def read(self,
cik,
rid,
limit,
sort='asc',
starttime=None,
endtime=None,
selection='all'):
options = self._readoptions(limit, sort, starttime, endtime, selection)
isok, response = self.exo.read(
cik,
rid,
options)
self._raise_for_response(isok, response)
return response
def find(self, cik, matches, shows, verbose=False):
showcik = False
if "cik" in shows:
shows = shows.replace("cik", "key")
if verbose:
print("Matching {0} and showing {1}".format(matches, shows))
matchers = {}
for matchval in matches.split(","):
data = re.findall(r"(.*?)([=<>^])(.*)", matchval)
for d in data:
if len(d) == 3:
matchers[d[0]] = (d[2], d[1])
shows = [s.strip() for s in shows.split(",")]
if verbose:
print("Showing: {0}".format(shows))
print("Matching: {0}".format(matchers))
data = self._infotree_fast(cik)
display_data = []
def compare(valueA, comp, valueB):
if verbose:
print(valueA, comp, valueB)
if comp == "^":
return valueA != valueB
elif comp == ">":
try:
return float(valueA) > float(valueB)
except:
return False
elif comp == "<":
try:
return float(valueA) < float(valueB)
except:
return False
elif comp == "=":
return valueA == valueB
return False
def match_node(node, level=0, parents=None):
if not parents:
parents = []
results = {'__matches':[], '__shows':[], "__children":[], "__output":[]}
if type(node) == type({}):
for k,v in node.iteritems():
#print "\t"*level, k
if type(v) == type({}):
res = match_node(v, level+1, parents+[k])
results['__shows'].extend(res['__shows'])
results['__matches'].extend(res['__matches'])
results['__children'].extend(res['__children'])
results['__output'].extend(res['__output'])
if k in shows:
#print "Show: ", k, v
results['__shows'].append((k,v, level, parents))
if k in matchers:
value, comparison = matchers.get(k)
result = compare(v, comparison, value)
if result:
if verbose:
print("Match: {0} {1}".format(k, v))
results['__matches'].append( (k,v,level, parents))
if k == "meta":
try:
jv = json.loads(v)
if type(jv) == type({}):
res = match_node(jv, level)
results['__shows'].extend(res['__shows'])
results['__matches'].extend(res['__matches'])
results['__children'].extend(res['__children'])
results['__output'].extend(res['__output'])
except:
if verbose:
print("Bad meta: {0}".format(v))
children = node.get('children', [])
for child in children:
res = match_node(child, level+1, [])
match_keys = set(r[0] for r in res['__matches'])
if all(k in match_keys for k in matchers.keys()):
results['__output'].append( res['__shows'] )
if type(node) == type([]):
for l in node:
res = match_node(l, level+1)
results['__shows'].extend(res['__shows'])
results['__matches'].extend(res['__matches'])
results['__children'].extend(res['__children'])
results['__output'].extend(res['__output'])
if type(node) == type(""):
pass
return results
output = []
for d in match_node(data)['__output']:
out = []
# Loop through so we get the correct order from our input shows
for show in shows:
for e in d:
if e[0] == show:
out.append(str(e[1]))
output.append("\t".join(out))
print("\n".join(output))
def _combinereads(self, reads, sort):
'''
>>> exo = ExoRPC()
>>> exo._combinereads([[[2, 'a'], [1, 'b']]])
[[2, ['a']], [1, ['b']]]
>>> exo._combinereads([[[3, 'a'], [2, 'b']], [[3, 77], [1, 78]]])
[[3, ['a', 77]], [2, ['b', None]], [1, [None, 78]]]
>>> exo._combinereads([[[5, 'a'], [4, 'b']], [[2, 'd'], [1, 'e']]])
[[5, ['a', None]], [4, ['b', None]], [2, [None, 'd']], [1, [None, 'e']]]
>>> exo._combinereads([])
[]
'''
if len(reads) == 0:
return []
else:
combined = []
# indexes into each list indicating the next
# unprocessed value
curi = [len(l) - 1 for l in reads]
#print(reads)
# loop until we've processed every element
while curi != [-1] * len(curi):
# minimum timestamp from unprocessed entries
timestamp = min([reads[i][ci] for i, ci in enumerate(curi) if ci is not -1],
key=itemgetter(0))[0]
# list of points we haven't processed in each read result
# (or None, if all have been processed)
unprocessed = [r[i] if i > -1 else None for i, r in zip(curi, reads)]
# list of values corresponding to timestamp t
values = [None if p is None or p[0] != timestamp else p[1]
for p in unprocessed]
#print('curi {}, values {}, unprocessed: {}'.format(curi, values, unprocessed))
# add to combined results
combined.append([timestamp, values])
# update curi based on which values were processed
for i, v in enumerate(values):
if v is not None:
curi[i] -= 1
if sort == 'desc':
reverse = True
else:
reverse = False
combined.sort(key=itemgetter(0), reverse=reverse)
return combined
def readmult(self,
cik,
rids,
limit,
sort='asc',
starttime=None,
endtime=None,
selection='all',
chunksize=212,
progress=lambda count: None):
'''Generates multiple rids and returns combined timestamped data like this:
[12314, [1, 77, 'a']
[12315, [2, 78, None]]
Where 1, 77, 'a' is the order rids were passed, and None represents
no data in that dataport for that timestamp.'''
options = self._readoptions(limit, sort, starttime, endtime, selection)
count = [0]
def _read(cik, rids, options):
responses = self._exomult(cik, [['read', rid, options] for rid in rids])
count[0] += len(responses)
progress(count[0])
return self._combinereads(responses, options['sort'])
if limit <= chunksize :
for r in _read(cik, rids, options):
yield r
else:
# Read chunks by limit.
maxLimit = options['limit']
if 'sort' in options and options['sort'] == 'desc':
# descending
if 'endtime' in options:
nextStart = options['endtime']
else:
nextStart = ExoUtilities.parse_ts_tuple(datetime.now().timetuple())
while True:
chunkOpt = options.copy()
chunkOpt['endtime'] = nextStart
chunkOpt['limit'] = chunksize
res = _read(cik, rids, chunkOpt);
if len(res) == 0:
break
maxLimit = maxLimit - len(res)
if maxLimit <= 0:
break;
#save oldest
nextStart = res[-1][0] - 1
for r in res:
yield r
else:
# ascending
if 'starttime' in options:
nextStart = options['starttime']
else:
nextStart = 0
while True:
chunkOpt = options.copy()
chunkOpt['starttime'] = nextStart
chunkOpt['limit'] = chunksize
res = _read(cik, rids, chunkOpt);
if len(res) == 0:
break
maxLimit = maxLimit - len(res)
if maxLimit <= 0:
break
#save oldest
nextStart = res[-1][0] + 1
for r in res:
yield r
def write(self, cik, rid, value):
isok, response = self.exo.write(cik, rid, value)
self._raise_for_response(isok, response)
def record(self, cik, rid, entries):
isok, response = self.exo.record(cik, rid, entries, {})
self._raise_for_response_record(isok, response)
def create(self, cik, type, desc, name=None):
if name is not None:
desc['name'] = name
isok, response = self.exo.create(cik, type, desc)
self._raise_for_response(isok, response)
return response
def update(self, cik, rid, desc):
isok, response = self.exo.update(cik, rid, desc)
self._raise_for_response(isok, response)
return response
def create_dataport(self, cik, format, name=None):
'''Create a dataport child of cik with common defaults.
(retention count duration set to "infinity"). Returns
RID string of the created dataport.'''
desc = {"format": format,
"retention": {
"count": "infinity",
"duration": "infinity"}
}
if name is not None:
desc['name'] = name
return self.create(cik, 'dataport', desc)
def create_client(self, cik, name=None, desc=None):
'''Create a client child of cik with common defaults.
('inherit' set for all limits). Returns RID string
of the created client.'''
if desc is None:
# default description
desc = {'limits': {'client': 'inherit',
'dataport': 'inherit',
'datarule': 'inherit',
'disk': 'inherit',
'dispatch': 'inherit',
'email': 'inherit',
'email_bucket': 'inherit',
'http': 'inherit',
'http_bucket': 'inherit',
'share': 'inherit',
'sms': 'inherit',
'sms_bucket': 'inherit',
'xmpp': 'inherit',
'xmpp_bucket': 'inherit'}
}
if name is not None:
desc['name'] = name
return self.create(cik, 'client', desc)
def drop(self, cik, rids):
for rid in rids:
self.exo.drop(cik, rid, defer=True)
if self.exo.has_deferred(cik):
self._raise_for_deferred(self.exo.send_deferred(cik))
def map(self, cik, rid, alias):
'''Creates an alias for rid. '''
isok, response = self.exo.map(cik, rid, alias)
self._raise_for_response(isok, response)
return response
def unmap(self, cik, alias):
'''Removes an alias a child of calling client.'''
isok, response = self.exo.unmap(cik, alias)
self._raise_for_response(isok, response)
return response
def lookup(self, cik, alias):
isok, response = self.exo.lookup(cik, 'alias', alias)
self._raise_for_response(isok, response)
return response
def lookup_owner(self, cik, rid):
isok, response = self.exo.lookup(cik, 'owner', rid)
self._raise_for_response(isok, response)
return response
def lookup_shared(self, cik, code):
isok, response = self.exo.lookup(cik, 'shared', code)
self._raise_for_response(isok, response)
return response
def listing(self, cik, types, options={}, rid=None):
isok, response = self.exo.listing(cik, types, options=options, rid=rid)
self._raise_for_response(isok, response)
return response
def _listing_with_info(self, auth, types, info_options={}, listing_options={}, read_options=None):
'''Return a dict mapping types to dicts mapping RID to info for that
RID. E.g.:
{'client': {'<rid0>':<info0>, '<rid1>':<info1>},
'dataport': {'<rid2>':<info2>, '<rid3>':<info3>}}
info_options and read_options correspond to the options parameters
for info and read.
read_options if set to something other than None, does a read for
any datarule or dataport in the listing, passing read_options
as options. The result of the read, a list of timestamp value
pairs, is placed inside the info dict in a 'read' property.'''
assert(len(types) > 0)
listing = self._exomult(auth, [['listing', types, listing_options, {'alias': ''}]])[0]
# listing is a dictionary mapping types to lists of RIDs, like this:
# {'client': ['<rid0>', '<rid1>'], 'dataport': ['<rid2>', '<rid3>']}
# request info for each rid
# (rids is a flattened version of listing)
rids = []
restype = {}
for typ in types:
rids += listing[typ]
for rid in listing[typ]:
restype[rid] = typ
info_commands = [['info', rid, info_options] for rid in rids]
read_commands = []
readable_rids = [rid for rid in rids if restype[rid] in ['dataport', 'datarule']]
if read_options is not None:
# add reads for readable resource types
read_commands += [['read', rid, read_options] for rid in readable_rids]
responses = self._exomult(auth, info_commands + read_commands)
# From the return values make a dict of dicts
# use ordered dicts in case someone cares about order in the output
response_index = 0
listing_with_info = OrderedDict()
for typ in types:
type_response = OrderedDict()
for rid in listing[typ]:
type_response[rid] = responses[response_index]
response_index += 1
if read_options is not None and rid in readable_rids:
type_response[rid]['read'] = responses[len(info_commands) + readable_rids.index(rid)]
listing_with_info[typ] = type_response
return listing_with_info
def info(self,
cik,
rid={'alias': ''},
options={},
cikonly=False,
recursive=False,
level=None):
'''Returns info for RID as a dict.'''
if cikonly:
options = {'key': True}
if recursive:
rid = None if type(rid) is dict else rid
response = self._infotree(cik,
rid=rid,
options=options,
level=level)
else:
isok, response = self.exo.info(cik, rid, options)
self._raise_for_response(isok, response)
if cikonly:
if not 'key' in response:
raise ExoException('{0} has no CIK'.format(rid))
return response['key']
else:
return response
def flush(self, cik, rids, newerthan=None, olderthan=None):
args=[]
options = {}
if newerthan is not None: options['newerthan'] = newerthan
if olderthan is not None: options['olderthan'] = olderthan
if len(options) > 0:
args.append(options)
cmds = [['flush', rid] + args for rid in rids]
self._exomult(cik, cmds)
def usage(self, cik, rid, metrics, start, end):
for metric in metrics:
self.exo.usage(cik, rid, metric, start, end, defer=True)
responses = []
if self.exo.has_deferred(cik):
responses = self._raise_for_deferred(self.exo.send_deferred(cik))
# show report
maxlen = max([len(m) for m in metrics])
for i, r in enumerate(responses):
print("{0}:{1} {2}".format(
metrics[i], ' ' * (maxlen - len(metrics[i])), r))
def share(self, cik, rid, options):
isok, response = self.exo.share(cik,
rid,
options)
self._raise_for_response(isok, response)
return response
def revoke(self, cik, codetype, code):
isok, response = self.exo.revoke(cik, codetype, code)
self._raise_for_response(isok, response)
return response
def activate(self, cik, codetype, code):
isok, response = self.exo.activate(cik, codetype, code)
self._raise_for_response(isok, response)
return response
def deactivate(self, cik, codetype, code):
isok, response = self.exo.deactivate(cik, codetype, code)
self._raise_for_response(isok, response)
return response
def clone(self, cik, options):
isok, response = self.exo.create(cik, 'clone', options)
self._raise_for_response(isok, response)
return response
def _print_tree_line(self, line):
if sys.version_info < (3, 0):
print(line.encode('utf-8'))
else:
print(line)
def humanize_date(self, time=False):
'''Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc.'''
now = datetime.now()
if type(time) is int:
diff = now - datetime.fromtimestamp(time)
elif isinstance(time,datetime):
diff = now - time
elif not time:
diff = now - now
return humanize.naturaltime(diff)
def _format_timestamp(self, values):
'''format tree latest point timestamp
values is up to two most recent values, e.g.:
[[<timestamp1>, <value1>], [<timestamp0>, <value0>]]'''
if values is None:
return None
if len(values) == 0:
return ''
return self.humanize_date(values[0][0])
def _format_value_with_previous(self, v, prev, maxlen):
'''Return a string representing the string v, w/maximum length
maxlen. If v is longer than maxlen, the return value
should include something that changed from previous
value prev.'''
v = repr(v)
prev = repr(prev)
if len(v) <= maxlen:
return v
sm = difflib.SequenceMatcher(None, prev, v)
def get_nonmatching_blocks(mb):
lasti = 0
out = []
for m in mb:
if m.b - lasti > 0:
out.append({'i': lasti, 'size': m.b - lasti})
lasti = m.b + m.size
return out
# get the blocks (index, size) of v that changed from prev
mb = list(sm.get_matching_blocks())
nonmatching_blocks = get_nonmatching_blocks(mb)
# get the biggest non-matching block
#bnb = nmb.sorted(nonmatching_blocks, key=lambda(b): b['size'])[-1]
def widen_block(block, s, left=0, right=0):
'''block is a location in s and size in this form:
{'i': <index>, 'size': <size>}. Return block b
such that the b is up to widen_by wider on the
left and right while keeping it within the bounds
of s. block must be already a subset of s. '''
out = copy.copy(block)
for j in range(left):
# try to add to left
if out['i'] > 0:
out['i'] -= 1
out['size'] += 1
for j in range(right):
# try to add to right
if out['i'] + out['size'] < len(s):
out['size'] += 1
return out
# number of characters of context to show on either side of a difference
context = 5
s = ''
print(prev)
print(v)
print(mb)
print(nonmatching_blocks)
startblock = widen_block(nonmatching_blocks[0], v, left=context, right=maxlen)
s = ''
if startblock['i'] > 0:
s += '...'
s += v[startblock['i']:startblock['i']+startblock['size']]
return s[:maxlen] + ('...' if startblock['i'] + len(s) < maxlen else '')
def _format_values(self, values, maxlen=20):
'''format latest value for output with tree
values is up to two most recent values, e.g.:
[[<timestamp1>, <value1>], [<timestamp0>, <value0>]]'''
if values is None:
return None
if len(values) == 0:
return ''
v = values[0][1]
if type(v) is float or type(v) is int:
return str(v)
elif type(v) is dict:
return str(v)
else:
latest = v.replace('\n', r'\n').replace('\r', r'\r')
out = (latest[:maxlen - 3] + '...') if len(latest) > maxlen else latest
return out
# this is not better
#prev = values[1][1] if len(values) > 1 else ''
#v = values[0][1]
#return self._format_value_with_previous(v, prev, maxlen)
def _print_node(self, rid, info, aliases, cli_args, spacer, islast, maxlen=None, values=None):
twee = cli_args['<command>'] == 'twee'
typ = info['basic']['type']
if typ == 'client':
id = 'cik: ' + info['key']
else:
id = 'rid: ' + rid
name = info['description']['name']
try:
# Units are a portals only thing
# u'comments': [[u'public', u'{"unit":"Fahrenheit"}']],']]
units = json.loads(info['comments'][0][1])['unit']
if len(units.strip()) == 0:
units = 'none'
except:
units = 'none'
# Sometimes aliases is a dict, sometimes a list. TODO: Why?
# Translate it into a list.
if type(aliases) is dict:
aliases = aliases.get(rid, [])
elif aliases is None:
aliases = []
opt = OrderedDict()
def add_opt(o, label, value):
if o is True or (o in cli_args and cli_args[o] is True):
opt[label] = value
try:
# show portals metadata if present
# http://developers.exosite.com/display/POR/Developing+for+Portals
meta = json.loads(info['description']['meta'])
device = meta['device']
if device['type'] == 'vendor':
add_opt(True, 'vendor', device['vendor'])
add_opt(True, 'model', device['model'])
add_opt(True, 'sn', device['sn'])
except:
pass
has_alias = aliases is not None and len(aliases) > 0
if has_alias:
if type(aliases) is list:
add_opt(True, 'aliases', json.dumps(aliases))
else:
add_opt(True, 'aliases', aliases)
# show RID for clients with no alias, or if --verbose was passed
ridopt = False
if typ == 'client':
if has_alias:
ridopt = '--verbose'
else:
ridopt = True
add_opt(ridopt, 'rid', rid)
add_opt('--verbose', 'unit', units)
if 'listing_option' in info and info['listing_option'] == 'activated':
add_opt(True, 'share', True)
if maxlen == None:
maxlen = {}
maxlen['type'] = len(typ)
maxlen['name'] = len(name)
maxlen['format'] = 0 if 'format' not in info['description'] else len(info['description']['format'])
try:
terminal_width, terminal_height = exocommon.get_terminal_size()
except:
# Default to 80 chars
terminal_width = 80
val = self._format_values(values, terminal_width)
timestamp = self._format_timestamp(values)
add_opt(values is not None, 'value', None if (val is None or timestamp is None) else val + '/' + timestamp)
if twee:
# colors, of course
class bcolors:
SPACER = '' if cli_args['--nocolor'] else '\033[0m'
NAME = '' if cli_args['--nocolor'] else '\033[0m'
TYPE = '' if cli_args['--nocolor'] else '\033[35m'
ID = '' if cli_args['--nocolor'] else '\033[32m'
VALUE = '' if cli_args['--nocolor'] else '\033[33m'
TIMESTAMP = '' if cli_args['--nocolor'] else '\033[34m'
PINK = '' if cli_args['--nocolor'] else '\033[35m'
MODEL = '' if cli_args['--nocolor'] else '\033[36m'
ENDC = '' if cli_args['--nocolor'] else '\033[0m'
# the goal here is to make the line short to provide more room for the value
# so if there's an alias, just use that, since it's
# if no alias, then the first ten of the RID and the name
# if multiple alias, then the first alias
if typ == 'client':
if cli_args['--rids']:
tweeid = bcolors.SPACER + 'rid: ' + bcolors.ID + rid
else:
tweeid = bcolors.SPACER + 'cik: ' + bcolors.ID + id[5:]
else:
if cli_args['--rids']:
tweeid = bcolors.SPACER + 'rid: ' + bcolors.ID + rid
else:
if aliases is not None and len(aliases) > 0:
tweeid = aliases[0]
else:
tweeid = 'rid.' + rid[:5]
displayname = ((name + bcolors.SPACER + ' ') if len(name) > 0 else ' ')
displaytype = {'dataport': 'dp', 'client': 'cl', 'datarule': 'dr', 'dispatch': 'ds'}[typ]
if 'format' in info['description']:
displaytype += '.' + {'binary': 'b', 'string': 's', 'float': 'f', 'integer': 'i'}[info['description']['format']]
else:
displaytype = ' ' + displaytype
displaymodel = ''
if 'sn' in opt and 'model' in opt:
displaymodel = ' (' + opt['model'] + '#' + opt['sn'] + ')'
if val:
twee_line = "".join([spacer, displayname, ' '*( maxlen['name']+1- len(name)), displaytype, tweeid, (' (share)' if 'listing_option' in info and info['listing_option'] == 'activated' else ''),
('' if typ == 'client' else ': '), ('' if timestamp is None or len(timestamp) == 0 else ' (' + timestamp + ')'), displaymodel])
val_size = len(val)
displayed_chars = len(twee_line)
allowed_size = terminal_width-displayed_chars
if val_size > allowed_size:
allowed_size -= 3
val = val[:allowed_size] + "..."
self._print_tree_line(
bcolors.SPACER +
spacer +
bcolors.NAME +
displayname +
' ' * (maxlen['name'] + 1 - len(name)) +
bcolors.TYPE +
displaytype + ' ' +
bcolors.ID +
tweeid +
bcolors.SPACER +
(' (share)' if 'listing_option' in info and info['listing_option'] == 'activated' else '') +
('' if typ == 'client' else ': ') +
bcolors.VALUE +
('' if val is None else val) +
bcolors.TIMESTAMP +
('' if timestamp is None or len(timestamp) == 0 else ' (' + timestamp + ')') +
bcolors.MODEL +
displaymodel +
bcolors.ENDC)
else:
# standard tree
if 'format' in info['description']:
fmt = info['description']['format']
desc = fmt + ' ' * (maxlen['format'] + 1 - len(fmt))
desc += typ + ' ' * (maxlen['type'] + 1 - len(typ))
desc += id
else:
desc = typ + ' ' * (maxlen['type'] + 1 - len(typ))
desc += id
self._print_tree_line('{0}{1}{2} {3} {4}'.format(
spacer,
name,
' ' * (maxlen['name'] + 1 - len(name)),
desc,
'' if len(opt) == 0 else '({0})'.format(', '.join(
['{0}: {1}'.format(k, v) for k, v in iteritems(opt)]))))
def tree(self, auth, aliases=None, cli_args={}, spacer='', level=0, info_options={}):
'''Print a tree of entities in OneP'''
max_level = int(cli_args['--level'])
# print root node
isroot = len(spacer) == 0
if isinstance(auth, six.string_types):
cik = auth
elif type(auth) is dict:
cik = auth['cik']
rid = auth['client_id']
else:
raise ExoException('Unexpected auth type ' + str(type(auth)))
if isroot:
# usage and counts are slow, so omit them if we don't need them
exclude = ['usage', 'counts']
info_options = self.make_info_options(exclude=exclude)
rid, info = self._exomult(auth,
[['lookup', 'alias', ''],
['info', {'alias': ''}, info_options]])
# info doesn't contain key
info['key'] = cik
aliases = info['aliases']
root_aliases = 'see parent'
self._print_node(rid,
info,
root_aliases,
cli_args,
spacer,
True)
if max_level == 0:
return
level += 1
types = ['dataport', 'datarule', 'dispatch', 'client']
try:
should_read = '--values' in cli_args and cli_args['--values']
#_listing_with_info() output looks like this
# {'client': {'<rid0>':<info0>, '<rid1>':<info1>},
# 'dataport': {'<rid2>':<info2>}}
listing = self._listing_with_info(auth,
types=types,
info_options=info_options,
listing_options={'owned': True},
read_options={'limit': 1} if should_read else None)
# mark as not shares
for t in types:
for info in listing[t].values():
info['listing_option'] = 'owned'
# add annotations for shares
listing_shares = self._listing_with_info(auth,
types=types,
info_options=info_options,
listing_options={'activated': True},
read_options={'limit': 1} if should_read else None)
# mark as shares and add to listing
for t in types:
for rid, info in listing_shares[t].items():
info['listing_option'] = 'activated'
# skip any shares that are in the same listing
if rid not in listing[t]:
listing[t][rid] = info
except pyonep.exceptions.OnePlatformException:
self._print_tree_line(
spacer +
" └─listing for {0} failed. info['basic']['status'] is \
probably not valid.".format(cik))
except ExoRPC.RPCException as ex:
if str(ex).startswith('locked ('):
self._print_tree_line(
spacer +
" └─{0} is locked".format(json.dumps(auth)))
else:
self._print_tree_line(
spacer +
" └─RPC error for {0}: {1}".format(json.dumps(auth), ex))
else:
# calculate the maximum length of various things for all children,
# so we can make things line up in the output.
maxlen = {}
namelengths = [len(l[1]['description']['name']) for typ in types for l in iteritems(listing[typ])]
maxlen['name'] = 0 if len(namelengths) == 0 else max(namelengths)
typelengths = [len(l[1]['basic']['type']) for typ in types for l in iteritems(listing[typ])]
maxlen['type'] = 0 if len(typelengths) == 0 else max(typelengths)
formatlengths = [len(l[1]['description']['format'])
for typ in types
for l in iteritems(listing[typ])
if 'format' in l[1]['description']]
maxlen['format'] = 0 if len(formatlengths) == 0 else max(formatlengths)
# print everything
for t_idx, t in enumerate(types):
typelisting = OrderedDict(sorted(iteritems(listing[t]), key=lambda x: x[1]['description']['name'].lower()))
islast_nonempty_type = (t_idx == len(types) - 1) or (all(len(listing[typ]) == 0 for typ in types[t_idx + 1:]))
for rid_idx, rid in enumerate(typelisting):
info = typelisting[rid]
islastoftype = rid_idx == len(typelisting) - 1
islast = islast_nonempty_type and islastoftype
if platform.system() != 'Windows':
if islast:
child_spacer = spacer + ' '
own_spacer = spacer + ' └─'
else:
child_spacer = spacer + ' │ '
own_spacer = spacer + ' ├─'
else:
# Windows executable
if islast:
child_spacer = spacer + ' '
own_spacer = spacer + ' +-'
else:
child_spacer = spacer + ' | '
own_spacer = spacer + ' +-'
if t == 'client':
self._print_node(rid, info, aliases, cli_args, own_spacer, islast, maxlen)
if max_level == -1 or level < max_level:
self.tree({'cik': cik, 'client_id': rid}, info['aliases'], cli_args, child_spacer, level=level + 1, info_options=info_options)
else:
self._print_node(rid, info, aliases, cli_args, own_spacer, islast, maxlen, values=info['read'] if 'read' in info else None)
def drop_all_children(self, cik):
isok, listing = self.exo.listing(
cik,
types=['client', 'dataport', 'datarule', 'dispatch'],
options={},
rid={'alias': ''})
self._raise_for_response(isok, listing)
rids = itertools.chain(*[listing[t] for t in listing.keys()])
self._exomult(cik, [['drop', rid] for rid in rids])
def _lookup_rid_by_name(self, cik, name, types=['datarule']):
'''Look up RID by name. We use name rather than alias to identify
scripts created in Portals because it only displays names to the
user, not aliases. Note that if multiple scripts have the same
name the first one in the listing is returned.'''
found_rid = None
listing = self._listing_with_info(cik, types)
for typ in listing:
for rid in listing[typ]:
if listing[typ][rid]['description']['name'] == name:
# return first match
return rid
return None
def _upload_script(self, cik, name, content, rid=None, alias=None, version='0.0.0'):
'''Upload a lua script, either creating one or updating the existing one'''
desc = {
'format': 'string',
'name': name,
'preprocess': [],
'rule': {
'script': content
},
'visibility': 'parent',
'retention': {
'count': 'infinity',
'duration': 'infinity'
}
}
meta = {
'version': version,
'uploads': 1,
'githash': ''
}
# if `git rev-parse HEAD` works, include that.
try:
githash = os.popen("git rev-parse HEAD").read()
meta['githash'] = githash
except:
pass
desc['meta'] = json.dumps(meta)
if rid is None:
success, rid = self.exo.create(cik, 'datarule', desc)
if success:
print("New script RID: {0}".format(rid))
else:
#print('cik: {0} desc: {1}'.format(cik, json.dumps(desc)))
raise ExoException("Error creating datarule: {0}".format(rid))
if alias is None:
alias = name
success, rid = self.exo.map(cik, rid, alias)
if success:
print("Aliased script to: {0}".format(alias))
else:
raise ExoException("Error aliasing script")
else:
isok, olddesc = self.exo.info(cik, rid)
if isok:
try:
oldmetajs = olddesc['description']['meta']
oldmeta = json.loads(oldmetajs)
uploads = oldmeta['uploads']
uploads = uploads + 1
meta['uploads'] = uploads
desc['meta'] = json.dumps(meta)
except:
pass
# if none of that works, go with the default above.
isok, response = self.exo.update(cik, rid, desc)
if isok:
print ("Updated script RID: {0}".format(rid))
else:
raise ExoException("Error updating datarule: {0}".format(response))
def cik_recursive(self, cik, fn):
'''Run fn on cik and all its client children'''
fn(cik)
lwi = self._listing_with_info(cik,
['client'],
info_options={'key': True})
# {'client': {'<rid0>':<info0>, '<rid1>':<info1>}]
for rid in lwi['client']:
self.cik_recursive(lwi['client'][rid]['key'], fn)
def upload_script_content(self,
ciks,
content,
name,
recursive=False,
create=False,
filterfn=lambda script: script,
rid=None,
version='0.0.0'):
for cik in ciks:
def up(cik, rid):
if rid is not None:
alias = None
if create:
# when creating, if <rid> is passed it must be an alias
# to use instead of name
if type(rid) is not dict:
raise ExoException('<rid> must be an alias when passing --create')
alias = rid['alias']
rid = None
self._upload_script(cik, name, content, rid=rid, alias=alias, version=version)
else:
rid = self._lookup_rid_by_name(cik, name)
if rid is not None or create:
self._upload_script(cik, name, content, rid=rid, version=version)
else:
# TODO: move this to spec plugin
print("Skipping CIK: {0} -- {1} not found".format(cik, name))
if not create:
print('Pass --create to create it')
if recursive:
self.cik_recursive(cik, lambda cik: up(cik, rid))
else:
up(cik, rid)
def upload_script(self,
ciks,
filename,
name=None,
recursive=False,
create=False,
filterfn=lambda script: script,
rid=None,
follow=False,
version='0.0.0'):
try:
f = open(filename)
except IOError:
raise ExoException('Error opening file {0}.'.format(filename))
else:
with f:
content = filterfn(f.read())
if len(content) > SCRIPT_LIMIT_BYTES:
sys.stderr.write(
'WARNING: script is {0} bytes over the size limit of {1} bytes.\n'.format(
len(content) - SCRIPT_LIMIT_BYTES, SCRIPT_LIMIT_BYTES))
if name is None:
# if no name is specified, use the file name as a name
name = os.path.basename(filename)
def upl():
self.upload_script_content(
ciks,
content,
name=name,
recursive=recursive,
create=create,
filterfn=filterfn,
rid=rid,
version=version)
if follow:
if len(ciks) > 1:
raise Exception('following more than one CIK is not supported')
lines = followSeries(
self,
ciks[0],
{'alias': name},
timeout_milliseconds=3000,
printFirst=True)
options = {'format': 'human'}
writer = serieswriter.SeriesWriter(['timestamp', 'log'], options)
last_modified = 0
last_activity = 0
last_status = ''
uploaded = False
nocolor = platform.system() == 'Windows'
class colors:
PINK = '' if nocolor else '\033[35m'
CYAN = '' if nocolor else '\033[36m'
YELLOW = '' if nocolor else '\033[33m'
GREEN = '' if nocolor else '\033[32m'
RED = '' if nocolor else '\033[31m'
GRAY = '' if nocolor else '\033[1;30m'
ENDC = '' if nocolor else '\033[0m'
def status_color(status):
return colors.RED if status == 'error' else colors.GREEN
# loop forever
for timestamp, vals in lines:
towrite = []
info = self._exomult(ciks[0], [
['info', {'alias': name}, {'basic': True, 'description': True}]])[0]
code = info['description']['rule']['script']
if timestamp is not None and vals is not None:
# received a point
# break up lines
if uploaded:
lines = vals[0].split('\n')
for line in lines:
# Parse lua errors and show the line with the error
# [string "..."]:6: global namespace is reserved
match = re.match('\[string ".*\.\.\."\]:(\d+): (.*)', line)
if match is None:
towrite.append([timestamp, [line], 'debug'])
else:
err_line = int(match.groups()[0])
code_lines = code.splitlines()
code_excerpt = ''
# previous line
if err_line > 1:
code_excerpt += (' ' * 11 + str(err_line - 1) + ' ' + code_lines[err_line - 2] + '\n')
# line with the error
code_excerpt += ' ' * 11 + str(err_line) + ' ' + code_lines[err_line - 1] + '\n'
# next line
if err_line < len(code_lines):
code_excerpt += (' ' * 11 + str(err_line + 1) + ' ' + code_lines[err_line])
err_msg = match.groups()[1]
towrite.append([timestamp, [colors.RED + 'ERROR: ' + err_msg + colors.ENDC + ' (line ' + str(err_line) + ')\n' + code_excerpt], 'debug'])
modified = info['basic']['modified']
if modified != last_modified:
if uploaded:
towrite.append([modified, [colors.PINK + 'script modified' + colors.ENDC], '00 modified'])
last_modified = modified
'''# sort by timestamp to keep the code simple
activities = sorted(info['basic']['activity'], key=lambda x: x[0])
for act_ts, act_list in activities:
if act_ts > last_activity:
if uploaded:
msg = ', '.join(reversed([status_color(s) + s + colors.ENDC for s in act_list])) + colors.ENDC
towrite.append(
[act_ts, [msg], '01 activity'])
last_activity = act_ts'''
status = info['basic']['status']
if status != last_status:
# this doesn't have a timestamp, so use the highest timestamp
towrite.append([
None,
['[' + colors.GRAY + '.' * 8 + colors.ENDC + '] ' +
status_color(status) + status + colors.ENDC],
'02 status'])
last_status = status
# upload *after* getting info for the first time,
# for more consistent output
if not uploaded:
# warn if script is unchanged
if code == content:
sys.stderr.write(colors.PINK + 'WARNING' + colors.ENDC + ': script code matches what is on the server, so script will NOT be restarted\n')
upl()
uploaded = True
# sort by timestamp, then tag
# (sorting by tag puts modified before status, which is more common)
towrite = sorted(towrite, key=lambda x: (x[0], x[2]))
for ts, vals, tag in towrite:
if ts is not None:
writer.write(ts, vals)
else:
print(vals[0])
#c = exocommon.getch()
#print('char: ' + c)
else:
upl()
def lookup_rid(self, cik, cik_to_find):
isok, listing = self.exo.listing(cik, types=['client'], options={}, rid={'alias': ''})
self._raise_for_response(isok, listing)
for rid in listing['client']:
self.exo.info(cik, rid, {'key': True}, defer=True)
if self.exo.has_deferred(cik):
responses = self.exo.send_deferred(cik)
for idx, r in enumerate(responses):
call, isok, response = r
self._raise_for_response(isok, response)
if response['key'] == cik_to_find:
return listing['client'][idx]
def record_backdate(self, cik, rid, interval_seconds, values):
'''Record a list of values and record them as if they happened in
the past interval_seconds apart. For example, if values
['a', 'b', 'c']
are passed in with interval 10, they're recorded as
[[0, 'c'], [-10, 'b'], [-20, 'a']].
interval_seconds must be positive.'''
timestamp = -interval_seconds
tvalues = []
values.reverse()
for v in values:
tvalues.append([timestamp, v])
timestamp -= interval_seconds
return self.record(cik, rid, tvalues)
def _create_from_infotree(self, parentcik, infotree):
'''Create a copy of infotree under parentcik'''
info_to_copy = infotree['info']
typ = info_to_copy['basic']['type']
rid = self.create(parentcik, typ, info_to_copy['description'])
if 'comments' in info_to_copy and len(info_to_copy['comments']) > 0:
commands = [['comment', rid, c[0], c[1]] for c in info_to_copy['comments']]
self._exomult(parentcik, commands)
if typ == 'client':
# look up new CIK
cik = self.info(parentcik, rid)['key']
children = infotree['info']['children']
aliases_to_create = {}
for child in children:
newrid, _ = self._create_from_infotree(cik, child)
if child['rid'] in infotree['info']['aliases']:
aliases_to_create[newrid] = infotree['info']['aliases'][child['rid']]
# add aliases in one request
self._exomult(
cik,
list(itertools.chain(*[[['map', r, alias]
for alias in aliases_to_create[r]]
for r in aliases_to_create])))
return rid, cik
else:
return rid, None
def _counttypes(self, infotree, counts=defaultdict(int)):
'''Return a dictionary with the count of each type of resource in the
tree. For example, {'client': 2, 'dataport': 1, 'dispatch':1}'''
info = infotree['info']
counts[info['basic']['type']] += 1
if 'children' in info:
for child in info['children']:
counts = self._counttypes(child, counts=counts)
return counts
def copy(self, cik, destcik, infotree=None):
'''Make a copy of cik and its non-client children to destcik and
return the cik of the copy.'''
# read in the whole client to copy at once
if infotree is None:
def check_for_unsupported(rid, info):
desc = info['description']
if 'subscribe' in desc and desc['subscribe'] is not None and len(desc['subscribe']) > 0:
raise ExoException('''Copy does not yet support resources that use the "subscribe" feature, as RID {0} in the source client does.\nIf you're just copying a device into the same portal consider using the clone command.'''.format(rid));
return rid
destcik = exoconfig.lookup_shortcut(destcik)
infotree = self._infotree(cik, options={}, nodeidfn=check_for_unsupported)
# check counts
counts = self._counttypes(infotree)
destinfo = self.info(destcik, options={'description': True, 'counts': True})
noroom = ''
for typ in counts:
destlimit = destinfo['description']['limits'][typ]
destcount = destinfo['counts'][typ]
needs = counts[typ]
# TODO: need a way to check if limit is set to 'inherit'
if type(destlimit) is int and destlimit - destcount < needs:
noroom = noroom + 'Thing to copy has {0} {1}{4}, parent has limit of {3} (and is using {2}).\n'.format(
needs, typ, destcount, destlimit, 's' if needs > 1 else '')
if len(noroom) > 0:
raise ExoException('Copy would violate parent limits:\n{0}'.format(noroom))
cprid, cpcik = self._create_from_infotree(destcik, infotree)
return cprid, cpcik
def _remove(self, dct, keypaths):
'''Remove keypaths from dictionary.
>>> ex = ExoRPC()
>>> ex._remove({'a': {'b': {'c': 1}}}, [['a', 'b', 'c']])
{'a': {'b': {}}}
>>> ex._remove({'a': {'b': {'q': 1}}}, [['a', 'b', 'c']])
{'a': {'b': {'q': 1}}}
>>> ex._remove({}, [['a'], ['b'], ['c']])
{}
>>> ex._remove({'q': 'a'}, [['a'], ['b']])
{'q': 'a'}
'''
for kp in keypaths:
x = dct
for i, k in enumerate(kp):
if k in x:
if i == len(kp) - 1:
del x[k]
else:
x = x[k]
else:
break
return dct
def _differences(self, dict1, dict2):
differ = difflib.Differ()
s1 = json.dumps(dict1, indent=2, sort_keys=True).splitlines(1)
s2 = json.dumps(dict2, indent=2, sort_keys=True).splitlines(1)
return list(differ.compare(s1, s2))
#def _infotree(self,
# auth,
# rid=None,
# restype='client',
# resinfo=None,
# nodeidfn=lambda rid,
# info: rid,
# options={},
# level=None,
# raiseExceptions=True,
# errorfn=lambda auth, msg: None):
def _infotree_fast(self,
auth,
nodeidfn=lambda rid, info: rid,
options={},
level=None,
listing_options={},
visit=lambda tree, level, parentRID: None):
'''Faster version of _infotree that uses the new listing and breadth
first traversal to reduce the number of RPC calls.'''
rootnode = {'tree': {'type': 'client'}, 'par': None}
#if rid is not None:
# # nodeidfn here?
# rootnode['tree']['rid'] = rid
level = 0
gen = [rootnode]
nextgen = []
def callback(commandset, result):
# add the commandset results to the node
node = commandset['node']
tree = node['tree']
info_idx = 0
listing_idx = 1
lookup_idx = 2
# set node info
if result[info_idx]['status'] != 'ok':
tree['info'] = {'error': result[info_idx]}
else:
tree['info'] = result[info_idx]['result']
# lookup is only done for the root node when rid is not known
if len(result) == lookup_idx + 1:
# this would not be OK
assert(result[lookup_idx]['status'] == 'ok')
tree['rid'] = result[lookup_idx]['result']
tree['rid'] = nodeidfn(tree['rid'], tree['info'])
# set node listing
if tree['type'] == 'client':
if result[listing_idx]['status'] != 'ok':
tree['children'] = {'error': result[listing_idx]}
else:
children = []
r = result[listing_idx]['result']
for typ in r.keys():
for rid in r[typ]:
children.append({'rid': rid, 'type': typ})
tree['children'] = children
def commandset(node):
rid = node['tree']['rid'] if 'rid' in node['tree'] else {'alias': ''}
types = ['client', 'dataport', 'datarule', 'dispatch']
commands = [
['info', rid, options]
]
if node['tree']['type'] == 'client':
commands.append(['listing', types, listing_options, rid])
if 'rid' not in node['tree']:
commands.append(['lookup', 'aliased', ''])
return {'node': node,
'commands': commands,
'callback': callback}
while len(gen) > 0:
# set up commandsets with callbacks that modify the nodes in gen
commands = map(commandset, gen)
# get info, listing, etc. for each node at this level
results = self._exobatch(auth, commands)
results = list(results)
# now the nodes are populated, so build up the next generation
for node in gen:
visit(node['tree'], level, node['par']);
if 'children' in node['tree']:
for child_tree in node['tree']['children']:
nextgen.append({'tree': child_tree, 'par': node['tree']['rid']})
gen = nextgen
nextgen = []
level += 1
return rootnode['tree']
def _infotree(self,
auth,
rid=None,
restype='client',
resinfo=None,
nodeidfn=lambda rid,
info: rid,
options={},
level=None,
raiseExceptions=True,
errorfn=lambda auth, msg: None):
'''Get all info for a cik and its children in a nested dict.
The basic unit is {'rid': '<rid>', 'info': <info-with-children>},
where <info-with-children> is just the info object for that node
with the addition of 'children' key, which is a dict containing
more nodes. Here's an example return value:
{'rid': '<rid 0>', 'info': {'description': ...,
'basic': ....,
...
'children: [{'rid': '<rid 1>', 'info': {'description': ...,
'basic': ...
'children': [{'rid': '<rid 2>', 'info': {'description': ...,
'basic': ...,
'children: [] } } },
{'rid': '<rid 3>', 'info': {'description': ...,
'basic': ...
'children': {} } }] } }
As it's building this nested dict, it calls nodeidfn with the rid and info
(w/o children) for each node.
'''
try:
# handle passing cik for auth
if isinstance(auth, string_types):
auth = {'cik': auth}
types = ['dataport', 'datarule', 'dispatch', 'client']
listing = {}
norid = rid is None
if norid:
rid, resinfo = self._exomult(auth, [
['lookup', 'aliased', ''],
['info', {'alias': ''}, options]])
else:
if resinfo is None:
resinfo = self._exomult(auth, [['info', rid, options]])[0]
myid = nodeidfn(rid, resinfo)
if level is not None and level <= 0:
return {'rid': myid, 'info': resinfo}
if restype == 'client':
if not norid:
# key is only available to owner (not the resource itself)
auth = {
'cik': auth['cik'],
'client_id': rid
}
try:
listing = self._exomult(auth, [['listing', types, {}, {'alias': ''}]])[0]
except ExoRPC.RPCException as e:
listing = dict([(t, []) for t in types])
errorfn(auth, str(e))
rids = [rid for rid in list(itertools.chain.from_iterable([listing[t] for t in types]))]
# break info calls into chunks to prevent timeout
chunksize = 20
def chunks(l, n):
'''Yield successive n-sized chunks from l.'''
for i in range(0, len(l), n):
yield l[i:i+n]
infos = []
for ridchunk in chunks(rids, chunksize):
infos += self._exomult(auth, [['info', rid, options] for rid in ridchunk])
else:
listing = []
resinfo['children'] = []
infoIndex = 0
for typ in types:
if typ in listing:
ridlist = listing[typ]
for childrid in ridlist:
tr = self._infotree(auth,
rid=childrid,
restype=typ,
resinfo=infos[infoIndex],
nodeidfn=nodeidfn,
options=options,
level=None if level is None else level-1,
raiseExceptions=raiseExceptions,
errorfn=errorfn)
infoIndex += 1
resinfo['children'].append(tr)
resinfo['children'].sort(key=lambda x: x['rid'] if 'rid' in x else '')
return {'rid': myid, 'info': resinfo}
except Exception as ex:
if raiseExceptions:
six.reraise(Exception, ex)
else:
return {'exception': ex, 'auth': auth, 'rid': rid}
def _difffilter(self, difflines):
d = difflines
# replace differing rid children lines with a single <<rid>>
ridline = '^[+-](.*").*\.[a-f0-9]{40}(".*)\n'
d = re.sub(ridline * 2, r' \1<<RID>>\2\n', d, flags=re.MULTILINE)
# replace differing rid alias lines with a single <<rid>> placeholder
a = '(.*")[a-f0-9]{40}("\: \[)\n'
plusa = '^\+' + a
minusa = '^\-' + a
d = re.sub(plusa + minusa, r' \1<<RID>>\2\n', d, flags=re.MULTILINE)
d = re.sub(minusa + plusa, r' \1<<RID>>\2\n', d, flags=re.MULTILINE)
# replace differing cik lines with a single <<cik>> placeholder
a = '(.*"key"\: ")[a-f0-9]{40}(",.*)\n'
plusa = '^\+' + a
minusa = '^\-' + a
d = re.sub(plusa + minusa, r' \1<<CIK>>\2\n', d, flags=re.MULTILINE)
d = re.sub(minusa + plusa, r' \1<<CIK>>\2\n', d, flags=re.MULTILINE)
return d
def diff(self, cik1, cik2, full=False, nochildren=False):
'''Show differences between two ciks.'''
cik2 = exoconfig.lookup_shortcut(cik2)
# list of info "keypaths" to not include in comparison
# only the last item in the list is removed. E.g. for a
# keypath of ['counts', 'disk'], only the 'disk' key is
# ignored.
ignore = [['usage'],
['counts', 'disk'],
['counts', 'email'],
['counts', 'http'],
['counts', 'share'],
['counts', 'sms'],
['counts', 'xmpp'],
['basic', 'status'],
['basic', 'modified'],
['basic', 'activity'],
['data']]
if nochildren:
info1 = self.info(cik1)
info1 = self._remove(info1, ignore)
info2 = self.info(cik2)
info2 = self._remove(info2, ignore)
else:
def name_prepend(rid, info):
if not full:
self._remove(info, ignore)
# prepend the name so that node names tend to sort (and so
# compare well)
return info['description']['name'] + '.' + rid
info1 = self._infotree(cik1, nodeidfn=name_prepend, options={})
info2 = self._infotree(cik2, nodeidfn=name_prepend, options={})
if info1 == info2:
return None
else:
differences = self._differences(info1, info2)
differences = ''.join(differences)
if not full:
# pass through a filter that removes
# differences that we don't care about
# (e.g. different RIDs)
differences = self._difffilter(differences)
if all([line[0] == ' ' for line in differences.split('\n')]):
return None
return differences
def make_info_options(self, include=[], exclude=[]):
'''Create options for the info command based on included
and excluded keys.'''
options = {}
# TODO: this is a workaround. The RPC API returns empty list if any
# keys are set to false. So, the workaround is to include all keys
# except for the excluded ones. This has the undesirable
# side-effect of producing "<key>": null in the results, so it would be
# better for this to be done in the API.
#
#for key in exclude:
# options[key] = False
if len(exclude) > 0:
options.update(dict([(k, True) for k in ['aliases',
'basic',
'counts',
'description',
'key',
'shares',
'subscribers',
'tags',
'usage']
if k not in exclude]))
else:
for key in include:
options[key] = True
return options
class ExoData():
'''Implements the Data Interface API
https://github.com/exosite/docs/tree/master/data'''
def __init__(self, url='http://m2.exosite.com'):
self.url = url
def raise_for_status(self, r):
try:
r.raise_for_status()
except Exception as ex:
raise ExoException(str(ex))
def read(self, cik, aliases):
headers = {'X-Exosite-CIK': cik,
'Accept': 'application/x-www-form-urlencoded; charset=utf-8'}
url = self.url + '/onep:v1/stack/alias?' + '&'.join(aliases)
r = requests.get(url, headers=headers)
self.raise_for_status(r)
return r.text
def write(self, cik, alias_values):
headers = {'X-Exosite-CIK': cik,
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'}
url = self.url + '/onep:v1/stack/alias'
r = requests.post(url, headers=headers, data=alias_values)
self.raise_for_status(r)
return r.text
def writeread(self, cik, alias_values, aliases):
headers = {'X-Exosite-CIK': cik,
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Accept': 'application/x-www-form-urlencoded; charset=utf-8'}
url = self.url + '/onep:v1/stack/alias?' + '&'.join(aliases)
r = requests.post(url, headers=headers, data=alias_values)
self.raise_for_status(r)
return r.text
def ip(self):
r = requests.get(self.url + '/ip')
r.raise_for_status()
return r.text
class ExoPortals():
'''Provides access to the Portals APIs'''
# list of procedures that may be included in invalidation data
writeprocs = ['activate',
'create',
'deactivate',
'drop',
'map',
'revoke',
'share',
'unmap',
'update']
def __init__(self, portalsserver='https://portals.exosite.com'):
self.portalsserver = portalsserver
def invalidate(self, data):
# This API is documented here:
# https://i.exosite.com/display/DEVPORTALS/Portals+Cache+Invalidation+API
data = json.dumps(data)
#print('invalidating with ' + data)
try:
response = requests.post(self.portalsserver + '/api/portals/v1/cache',
data=data)
except Exception as ex:
raise ExoException('Failed to connect to ' + self.portalsserver)
try:
response.raise_for_status
except Exception as ex:
raise ExoException('Bad status from Portals cache invalidate API call: ' + ex)
class ExoUtilities():
@classmethod
def parse_ts(cls, s):
return None if s is None else ExoUtilities.parse_ts_tuple(parser.parse(s).timetuple())
@classmethod
def parse_ts_tuple(cls, t):
return int(time.mktime(t))
@classmethod
def get_startend(cls, args):
'''Get start and end timestamps based on standard arguments'''
start = args.get('--start', None)
end = args.get('--end', None)
def is_ts(s):
return s is not None and re.match('^-?[0-9]+$', s) is not None
if is_ts(start):
start = int(start)
if start < 0:
start = ExoUtilities.parse_ts_tuple((datetime.now() + timedelta(seconds=start)).timetuple())
else:
start = ExoUtilities.parse_ts(start)
if end == 'now':
end = None
elif is_ts(end):
end = int(end)
if end < 0:
end = ExoUtilities.parse_ts_tuple((datetime.now() + timedelta(seconds=end)).timetuple())
else:
end = ExoUtilities.parse_ts(end)
return start, end
@classmethod
def format_time(cls, sec):
'''Formats a time interval for human consumption'''
intervals = [[60 * 60 * 24, 'd'],
[60 * 60, 'h'],
[60, 'm']]
text = ""
for s, label in intervals:
if sec >= s and sec // s > 0:
text = "{0} {1}{2}".format(text, sec // s, label)
sec -= s * (sec // s)
if sec > 0:
text += " {0}s".format(sec)
return text.strip()
@classmethod
def handleSystemExit(cls, ex):
# Handle SystemExit per https://docs.python.org/2/library/exceptions.html#exceptions.SystemExit
if ex.code is None:
return 0
elif isinstance(ex.code, six.string_types):
sys.stderr.write(ex.code)
return 1
elif type(ex.code is int):
return ex.code
else:
sys.stderr.write('Unexpected exitcode: {0}\n'.format(ex.code))
return 1
def spark(numbers, empty_val=None):
"""Generate a text based sparkline graph from a list of numbers (ints or
floats).
When value is empty_val, show no bar.
https://github.com/1stvamp/py-sparkblocks
Based on:
https://github.com/holman/spark
and:
http://www.datadrivenconsulting.com/2010/06/twitter-sparkline-generator/
"""
out = []
min_value = min(numbers)
max_value = max(numbers)
value_scale = max_value - min_value
for number in numbers:
if number == empty_val:
out.append(" ")
else:
if (number - min_value) != 0 and value_scale != 0:
scaled_value = (number - min_value) / value_scale
else:
scaled_value = 0
num = math.floor(min([6, (scaled_value * 7)]))
# Hack because 9604 and 9608 aren't vertically aligned the same as
# other block elements
if num == 3:
if (scaled_value * 7) < 3.5:
num = 2
else:
num = 4
elif num == 7:
num = 6
if six.PY3:
unichrfn = chr
else:
unichrfn = unichr
out.append(unichrfn(int(9601 + num)))
return ''.join(out)
def meanstdv(l):
'''Calculate mean and standard deviation'''
n, mean, std = len(l), 0, 0
mean = sum(l) / float(len(l))
std = math.sqrt(sum([(x - mean)**2 for x in l]) / (len(l) - 1))
return mean, std
def show_intervals(er, cik, rid, start, end, limit, numstd=None):
# show a distribution of intervals between data
data = er.read(cik,
rid,
limit,
sort='desc',
starttime=start,
endtime=end)
if len(data) == 0:
return
intervals = [data[i - 1][0] - data[i][0] for i in range(1, len(data))]
intervals = sorted(intervals)
if numstd is not None:
# only include data within numstd standard deviations
# of the mean
mean, std = meanstdv(intervals)
intervals = [x for x in intervals
if mean - numstd * std <= x
and x <= mean + numstd * std]
if len(intervals) == 0:
return
num_bins = 60
min_t, max_t = min(intervals), max(intervals)
bin_size = float(max_t - min_t) / num_bins * 1.0
bins = []
for i in range(num_bins):
bin_min = min_t + i * bin_size
bin_max = min_t + (i + 1) * bin_size
if i != 0:
critfn = lambda x: bin_min < x and x <= bin_max
else:
critfn = lambda x: bin_min <= x and x <= bin_max
#bins.append((bin_min, bin_max, float(
# sum(map(critfn, intervals)))))
if six.PY3:
mapfn = map
else:
mapfn = itertools.imap
bins.append(float(sum(mapfn(critfn, intervals))))
print(spark(bins, empty_val=0))
min_label = ExoUtilities.format_time(min_t)
max_label = ExoUtilities.format_time(max_t)
sys.stdout.write(min_label)
sys.stdout.write(' ' * (num_bins - len(min_label) - len(max_label)))
sys.stdout.write(max_label + '\n')
# return a generator that reads rid forever and yields either:
# A. timestamp, value pair (on data)
# B. None, None (on timeout)
def followSeries(er, cik, rid, timeout_milliseconds, printFirst=True):
# do an initial read
results = er.readmult(
cik,
[rid],
limit=1,
selection='all',
sort='desc')
# --follow doesn't want the result to be an iterator
results = list(results)
last_t, last_v = 0, None
if len(results) > 0 and printFirst:
last_t, last_v = results[0]
yield(last_t, last_v)
while True:
timedout, point = er.wait(
cik,
rid,
since=last_t + 1,
timeout=timeout_milliseconds)
if not timedout:
last_t, last_v = point
yield(last_t, [last_v])
# flush output for piping this output to other programs
sys.stdout.flush()
else:
yield(None, None)
def read_cmd(er, cik, rids, args):
'''Read command'''
if len(rids) == 0:
# if only a CIK was passed, include all dataports and datarules
# by default.
listing = er.listing(cik, ['dataport', 'datarule'], options={}, rid={'alias': ''})
rids = listing['dataport'] + listing['datarule']
aliases = er.info(cik, options={'aliases': True})['aliases']
# look up aliases for column headers
cmdline_rids = [aliases[rid][0] if rid in aliases else rid for rid in rids]
# in this case default to showing headers
headertype = 'rid'
else:
cmdline_rids = args['<rid>']
headertype = args['--header']
limit = args['--limit']
limit = 1 if limit is None else int(limit)
# time range
start, end = ExoUtilities.get_startend(args)
timeformat = args['--timeformat']
if headertype == 'name':
# look up names of rids
infos = er._exomult(cik,
[['info', r, {'description': True}] for r in rids])
headers = ['timestamp'] + [i['description']['name'] for i in infos]
else:
# use whatever headers were passed at the command line (RIDs or
# aliases)
headers = ['timestamp'] + [str(r) for r in cmdline_rids]
fmt = args['--format']
tz = args['--tz']
options = {
'format': fmt,
'timeformat': timeformat,
'tz': tz
}
lw = serieswriter.SeriesWriter(headers, options)
if headertype is not None:
# write headers
lw.write_headers()
timeout_milliseconds = 3000
if args['--follow']:
if len(rids) > 1:
raise ExoException('--follow does not support reading from multiple rids')
lines = followSeries(
er,
cik,
rids[0],
timeout_milliseconds=timeout_milliseconds,
printFirst=True)
# goes forever
for ts, v in lines:
if ts is not None and v is not None:
lw.write(ts, v)
else:
chunksize = int(args['--chunksize'])
result = er.readmult(cik,
rids,
sort=args['--sort'],
starttime=start,
endtime=end,
limit=limit,
selection=args['--selection'],
chunksize=chunksize)
for t, v in result:
lw.write(t, v)
def plain_print(arg):
print(arg)
def pretty_print(arg):
print(json.dumps(arg, sort_keys=True, indent=4, separators=(',', ': ')))
def handle_args(cmd, args):
use_https = False if args['--http'] is True else True
# command-specific http timeout defaults
if args['--httptimeout'] == '60':
if args['<command>'] == 'copy':
args['--httptimeout'] == '480'
port = args['--port']
if port is None:
port = DEFAULT_PORT_HTTPS if use_https else DEFAULT_PORT
er = ExoRPC(host=args['--host'],
port=port,
https=use_https,
httptimeout=args['--httptimeout'],
logrequests=args['--clearcache'],
user_agent=args['--useragent'],
curldebug=args['--curl'])
pop = provision.Provision(
host=args['--host'],
manage_by_cik=False,
port=port,
verbose=True,
https=use_https,
raise_api_exceptions=True,
curldebug=args['--curl'])
if cmd in ['ip', 'data']:
if args['--https'] is True or args['--port'] is not None or args['--debughttp'] is True or args['--curl'] is True:
# TODO: support these
raise ExoException('--https, --port, --debughttp, and --curl are not supported for ip and data commands.')
ed = ExoData(url='http://' + args['--host'])
if cmd in ['portals'] or args['--clearcache']:
portals = ExoPortals(args['--portals'])
if '<cik>' in args and args['<cik>'] is not None:
cik = args['<cik>']
if type(cik) is list:
cik = [exoconfig.lookup_shortcut(c) for c in cik]
else:
cik = exoconfig.lookup_shortcut(cik)
else:
# for data ip command
cik = None
def rid_or_alias(rid, cik=None):
'''Translate what was passed for <rid> to an alias object if
it doesn't look like a RID.'''
if er.regex_rid.match(rid) is None:
if er.regex_tweeid.match(rid) is None:
return {'alias': rid}
else:
# look up full RID based on short version
tweetype, ridfrag = rid.split('.')
listing = er.listing(cik, ['client', 'dataport', 'datarule', 'dispatch'], options={}, rid={'alias': ''})
candidates = []
for typ in listing:
for fullrid in listing[typ]:
if fullrid.startswith(ridfrag):
candidates.append(fullrid)
if len(candidates) == 1:
return candidates[0]
elif len(candidates) > 1:
raise ExoException('More than one RID starts with ' + ridfrag + '. Better use the full RID.')
else:
raise ExoException('No RID found that starts with ' + ridfrag + '. Is it an immediate child of ' + cik + '?')
else:
return rid
rids = []
if '<rid>' in args:
if type(args['<rid>']) is list:
for rid in args['<rid>']:
rids.append(rid_or_alias(rid, cik))
else:
if args['<rid>'] is None:
rids.append({"alias": ""})
else:
rids.append(rid_or_alias(args['<rid>'], cik))
if args.get('--pretty', False):
pr = pretty_print
else:
pr = plain_print
try:
if cmd == 'read':
read_cmd(er, cik, rids, args)
elif cmd == 'write':
if args['-']:
val = sys.stdin.read()
# remove extra newline
if val[-1] == '\n':
val = val[:-1]
er.write(cik, rids[0], val)
else:
er.write(cik, rids[0], args['--value'])
elif cmd == 'record':
interval = args['--interval']
if interval is None:
# split timestamp, value
if not args['--value']:
headers = ['timestamp'] + [x for x in range(0,len(rids))]
if sys.version_info < (3, 0):
dr = csv.DictReader(sys.stdin, headers, encoding='utf-8')
else:
dr = csv.DictReader(sys.stdin, headers)
rows = list(dr)
chunkcnt=0
entries=[[] for x in range(0,len(rids))]
for row in rows:
s = row['timestamp']
if s is not None and re.match('^[-+]?[0-9]+$', s) is not None:
ts = int(s)
else:
ts = ExoUtilities.parse_ts(s)
for column in range(0,len(rids)):
value = row[column]
# TODO: How to deal with an empty cell should be a cmdline option.
# skip it, or record a default number or empty string?
if value is not None:
entries[column].append([ts, value])
chunkcnt += 1
if chunkcnt > int(args['--chunksize']):
for idx in range(0,len(rids)):
er.record(cik, rids[idx], entries[idx])
chunkcnt = 0
entries=[[] for x in range(0,len(rids))]
for idx in range(0,len(rids)):
if len(entries[idx]) > 0:
er.record(cik, rids[idx], entries[idx])
else:
entries = []
has_errors = False
tvalues = args['--value']
reentry = re.compile('(-?\d+),(.*)')
for tv in tvalues:
match = reentry.match(tv)
if match is None:
try:
t, v = tv.split(',')
if t is not None and re.match('^[-+]?[0-9]+$', t) is not None:
ts = int(t)
else:
ts = ExoUtilities.parse_ts(t)
entries.append([ts, v])
except Exception:
sys.stderr.write(
'Line not in <timestamp>,<value> format: {0}'.format(tv))
has_errors = True
else:
g = match.groups()
s = g[0]
if s is not None and re.match('^[-+]?[0-9]+$', s) is not None:
ts = int(s)
else:
ts = ExoUtilities.parse_ts(s)
entries.append([ts, g[1]])
if has_errors or len(entries) == 0:
raise ExoException("Problems with input.")
else:
er.record(cik, rids[0], entries)
else:
if args['-']:
values = [v.strip() for v in sys.stdin.readlines()]
else:
values = args['--value']
interval = int(interval)
if interval <= 0:
raise ExoException("--interval must be positive")
er.record_backdate(cik, rids[0], interval, values)
elif cmd == 'create':
typ = args['--type']
ridonly = args['--ridonly']
cikonly = args['--cikonly']
if ridonly and cikonly:
raise ExoException('--ridonly and --cikonly are mutually exclusive')
if args['-']:
s = sys.stdin.read()
try:
desc = json.loads(s)
except Exception as ex:
raise ExoException(ex)
rid = er.create(cik,
type=typ,
desc=desc,
name=args['--name'])
elif typ == 'client':
rid = er.create_client(cik,
name=args['--name'])
elif typ == 'dataport':
rid = er.create_dataport(cik,
args['--format'],
name=args['--name'])
else:
raise ExoException('No defaults for {0}.'.format(args['--type']))
if ridonly:
pr(rid)
elif cikonly:
print(er.info(cik, rid, cikonly=True))
else:
pr('rid: {0}'.format(rid))
if typ == 'client':
# for convenience, look up the cik
print('cik: {0}'.format(er.info(cik, rid, cikonly=True)))
if args['--alias'] is not None:
er.map(cik, rid, args['--alias'])
if not ridonly:
print("alias: {0}".format(args['--alias']))
elif cmd == 'update':
s = sys.stdin.read()
try:
desc = json.loads(s)
except Exception as ex:
raise ExoException(ex)
pr(er.update(cik, rids[0], desc=desc))
elif cmd == 'map':
er.map(cik, rids[0], args['<alias>'])
elif cmd == 'unmap':
er.unmap(cik, args['<alias>'])
elif cmd == 'lookup':
# look up by cik or alias
cik_to_find = args['--cik']
owner_of = args['--owner-of']
share = args['--share']
if cik_to_find is not None:
cik_to_find = exoconfig.lookup_shortcut(cik_to_find)
rid = er.lookup_rid(cik, cik_to_find)
if rid is not None:
pr(rid)
elif owner_of is not None:
rid = er.lookup_owner(cik, owner_of)
if rid is not None:
pr(rid)
elif share is not None:
rid = er.lookup_shared(cik, share)
if rid is not None:
pr(rid)
else:
alias = args['<alias>']
if alias is None:
alias = ""
pr(er.lookup(cik, alias))
elif cmd == 'drop':
if args['--all-children']:
er.drop_all_children(cik)
else:
if len(rids) == 0:
raise ExoException("<rid> is required")
er.drop(cik, rids)
elif cmd == 'listing':
types = args['--types'].split(',')
options = {}
tags = args['--tagged']
if tags is not None:
options['tagged'] = tags.split(',')
filters = args['--filters']
if filters is not None:
for f in filters.split(','):
options[f] = True
listing = er.listing(cik, types, options=options, rid=rids[0])
if args['--plain']:
for t in types:
for rid in listing[t]:
print(rid)
else:
pr(json.dumps(listing))
elif cmd == 'whee':
tree = er._infotree_fast(cik, options={'basic': True})
pr(json.dumps(tree))
elif cmd == 'info':
include = args['--include']
include = [] if include is None else [key.strip()
for key in include.split(',')]
exclude = args['--exclude']
exclude = [] if exclude is None else [key.strip()
for key in exclude.split(',')]
options = er.make_info_options(include, exclude)
level = args['--level']
level = None if level is None or args['--recursive'] is False else int(level)
info = er.info(cik,
rids[0],
options=options,
cikonly=args['--cikonly'],
recursive=args['--recursive'],
level=level)
if args['--pretty']:
pr(info)
else:
if args['--cikonly']:
pr(info)
else:
# output json
pr(json.dumps(info))
elif cmd == 'flush':
start, end = ExoUtilities.get_startend(args)
er.flush(cik, rids, newerthan=start, olderthan=end)
elif cmd == 'usage':
allmetrics = ['client',
'dataport',
'datarule',
'dispatch',
'email',
'http',
'sms',
'xmpp']
start, end = ExoUtilities.get_startend(args)
er.usage(cik, rids[0], allmetrics, start, end)
# special commands
elif cmd == 'tree':
er.tree(cik, cli_args=args)
elif cmd == 'find':
shows = args['--show'] if args['--show'] else "cik"
er.find(cik, args['--match'], shows)
elif cmd == 'twee':
args['--values'] = True
if platform.system() == 'Windows':
args['--nocolor'] = True
er.tree(cik, cli_args=args)
elif cmd == 'script':
# cik is a list of ciks
if args['--file']:
filename = args['--file']
else:
filename = args['<script-file>']
rid = None if args['<rid>'] is None else rids[0]
svers = None if not '--setversion' in args else args['--setversion']
er.upload_script(cik,
filename,
name=args['--name'],
recursive=args['--recursive'],
create=args['--create'],
rid=rid,
follow=args['--follow'],
version=svers)
elif cmd == 'spark':
days = int(args['--days'])
end = ExoUtilities.parse_ts_tuple(datetime.now().timetuple())
start = ExoUtilities.parse_ts_tuple((datetime.now() - timedelta(days=days)).timetuple())
numstd = args['--stddev']
numstd = int(numstd) if numstd is not None else None
show_intervals(er, cik, rids[0], start, end, limit=1000000, numstd=numstd)
elif cmd == 'copy':
destcik = args['<destination-cik>']
newrid, newcik = er.copy(cik, destcik)
if args['--cikonly']:
pr(newcik)
else:
pr('cik: ' + newcik)
elif cmd == 'diff':
if sys.version_info < (2, 7):
raise ExoException('diff command requires Python 2.7 or above')
diffs = er.diff(cik,
args['<cik2>'],
full=args['--full'],
nochildren=args['--no-children'])
if diffs is not None:
print(diffs)
elif cmd == 'ip':
pr(ed.ip())
elif cmd == 'data':
reads = args['--read']
writes = args['--write']
def get_alias_values(writes):
# TODO: support values with commas
alias_values = []
re_assign = re.compile('(.*),(.*)')
for w in writes:
if w.count(',') > 1:
raise ExoException('Values with commas are not supported.')
m = re_assign.match(w)
if m is None or len(m.groups()) != 2:
raise ExoException("Bad alias assignment format")
alias_values.append(m.groups())
return alias_values
if len(reads) > 0 and len(writes) > 0:
alias_values = get_alias_values(writes)
print(ed.writeread(cik, alias_values, reads))
elif len(reads) > 0:
print(ed.read(cik, reads))
elif len(writes) > 0:
alias_values = get_alias_values(writes)
ed.write(cik, alias_values)
elif cmd == 'portals':
procedures = args['<procedure>']
if len(procedures) == 0:
procedures = ExoPortals.writeprocs
else:
unknownprocs = []
for p in procedures:
if p not in ExoPortals.writeprocs:
unknownprocs.append(p)
if len(unknownprocs) > 0:
raise ExoException(
'Unknown procedure(s) {0}'.format(','.join(unknownprocs)))
data = {'auth': {'cik': cik},
'calls':[{'procedure': p, 'arguments': [], 'id': i}
for i, p in enumerate(procedures)]}
portals.invalidate(data)
elif cmd == 'share':
options = {}
share = args['--share']
if share is not None:
options['share'] = share
meta = args['--meta']
if meta is not None:
options['meta'] = meta
pr(er.share(cik,
rids[0],
options))
elif cmd == 'revoke':
if args['--share'] is not None:
typ = 'share'
code = args['--share']
else:
typ = 'client'
code = args['--client']
pr(er.revoke(cik, typ, code))
elif cmd == 'activate':
if args['--share'] is not None:
typ = 'share'
code = args['--share']
else:
typ = 'client'
code = args['--client']
er.activate(cik, typ, code)
elif cmd == 'deactivate':
if args['--share'] is not None:
typ = 'share'
code = args['--share']
else:
typ = 'client'
code = args['--client']
er.deactivate(cik, typ, code)
elif cmd == 'clone':
options = {}
if args['--share'] is not None:
options['code'] = args['--share']
if args['--rid'] is not None:
rid_to_clone = args['--rid']
if er.regex_rid.match(rid_to_clone) is None:
# try to look up RID for an alias
alias = rid_to_clone
rid_to_clone = er.lookup(cik, alias)
options['rid'] = rid_to_clone
options['noaliases'] = args['--noaliases']
options['nohistorical'] = args['--nohistorical']
rid = er.clone(cik, options)
pr('rid: {0}'.format(rid))
info = er.info(cik, rid, {'basic': True, 'key': True})
typ = info['basic']['type']
copycik = info['key']
if typ == 'client':
if not args['--noactivate']:
er.activate(cik, 'client', copycik)
# for convenience, look up the cik
pr('cik: {0}'.format(copycik))
else:
# search plugins
handled = False
exitcode = 1
for plugin in plugins:
if cmd in plugin.command():
options = {
'cik': cik,
'rids': rids,
'rpc': er,
'provision': pop,
'exception': ExoException,
'provision-exception': pyonep.exceptions.ProvisionException,
'utils': ExoUtilities,
'config': exoconfig
}
try:
options['data'] = ed
except NameError:
# no problem
pass
if cmd == "switches":
options['doc'] = cmd_doc
exitcode = plugin.run(cmd, args, options)
handled = True
break
if not handled:
raise ExoException("Command not handled")
return exitcode
finally:
if args['--clearcache']:
for req in er.exo.loggedrequests():
procs = [c['procedure'] for c in req['calls']]
# if operation will invalidate the Portals cache...
if len([p for p in procs if p in ExoPortals.writeprocs]) > 0:
portals.invalidate(req)
class DiscreetFilter(object):
'''Filter stdin/stdout to hide anything that looks like
an RID'''
def __init__(self, out):
self.out = out
# match the two halves of an RID/CIK
self.ridre = re.compile('([a-fA-F0-9]{20})([a-fA-F0-9]{20})')
def write(self, message):
# hide the second half
if sys.version_info < (3, 0):
message = message.decode('utf-8')
s = self.ridre.sub('\g<1>01234567890123456789', message)
if sys.version_info < (3, 0):
s = s.encode('utf-8')
self.out.write(s)
def flush(self):
self.out.flush()
def cmd(argv=None, stdin=None, stdout=None, stderr=None):
'''Wrap the command line interface. Globally redirects args
and io so that the application can be tested externally.'''
# globally redirect args and io
if argv is not None:
sys.argv = argv
if stdin is not None:
sys.stdin = stdin
if stderr is not None:
sys.stderr = stderr
if stdout is not None:
sys.stdout = stdout
# add the first line of the detailed documentation to
# the exo --help output. Some lines span newlines.
max_cmd_length = max(len(cmd) for cmd in cmd_doc)
command_list = ''
for cmd in cmd_doc:
lines = cmd_doc[cmd].split('\n\n')[0].split('\n')
command_list += ' ' + cmd + ' ' * (max_cmd_length - len(cmd)) + ' ' + lines[0] + '\n'
for line in lines[1:]:
command_list += ' ' * max_cmd_length + line + '\n'
doc = __doc__.replace('{{ command_list }}', command_list)
try:
args = docopt(
doc,
version="Exosite Command Line {0}".format(__version__),
options_first=True)
except SystemExit as ex:
return ExoUtilities.handleSystemExit(ex)
global exoconfig
if args['--config'] is None:
args['--config'] = os.environ.get('EXO_CONFIG', '~/.exoline')
exoconfig = ExoConfig(args['--config'])
# get command args
cmd = args['<command>']
argv = [cmd] + args['<args>']
if cmd in cmd_doc:
# if doc expects yet another command, pass options_first=True
options_first = True if re.search(
'^Commands:$',
cmd_doc[cmd],
flags=re.MULTILINE) else False
try:
args_cmd = docopt(cmd_doc[cmd], argv=argv, options_first=options_first)
except SystemExit as ex:
return ExoUtilities.handleSystemExit(ex)
else:
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits(word):
# courtesy of:
# http://norvig.com/spell-correct.html
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
e = edits(cmd)
# make a list of valid commands the user could have meant
alts = [w for w in cmd_doc.keys() if w in e]
alt_msg = ''
if 0 < len(alts) and len(alts) < 4:
alt_msg = 'Did you mean {0}? '.format(' or '.join(alts))
print('Unknown command {0}. {1}Try "exo --help"'.format(cmd, alt_msg))
return 1
# merge command-specific arguments into general arguments
args.update(args_cmd)
# turn on stdout/stderr filtering
if args['--discreet']:
sys.stdout = DiscreetFilter(sys.stdout)
sys.stderr = DiscreetFilter(sys.stderr)
# configure logging
logging.basicConfig(stream=sys.stderr)
logging.getLogger("pyonep.onep").setLevel(logging.ERROR)
if args['--debughttp'] or args['--curl']:
logging.getLogger("pyonep.onep").setLevel(logging.DEBUG)
logging.getLogger("pyonep.provision").setLevel(logging.DEBUG)
# substitute environment variables
if args['--host'] is None:
args['--host'] = os.environ.get('EXO_HOST', DEFAULT_HOST)
if args['--port'] is None:
args['--port'] = os.environ.get('EXO_PORT', None)
exoconfig.mingleArguments(args)
try:
exitcode = handle_args(cmd, args)
if exitcode is None:
return 0
else:
return exitcode
except ExoException as ex:
# command line tool threw an exception on purpose
sys.stderr.write("Command line error: {0}\r\n".format(ex))
return 1
except ExoRPC.RPCException as ex:
# pyonep library call signaled an error in return values
sys.stderr.write("One Platform error: {0}\r\n".format(ex))
return 1
except pyonep.exceptions.ProvisionException as ex:
# if the body of the provision response is something other
# than a repeat of the status and reason, show it
showBody = str(ex).strip() != "HTTP/1.1 {0} {1}".format(
ex.response.status(),
ex.response.reason())
sys.stderr.write(
"One Platform provisioning exception: {0}{1}\r\n".format(
ex,
' (' + str(ex.response.body).strip() + ')' if showBody else ''))
return 1
except pyonep.exceptions.OnePlatformException as ex:
# pyonep library call threw an exception on purpose
sys.stderr.write("One Platform exception: {0}\r\n".format(ex))
return 1
except pyonep.exceptions.JsonRPCRequestException as ex:
sys.stderr.write("JSON RPC Request Exception: {0}\r\n".format(ex))
return 1
except pyonep.exceptions.JsonRPCResponseException as ex:
sys.stderr.write("JSON RPC Response Exception: {0}\r\n".format(ex))
return 1
except KeyboardInterrupt:
if args['--debug']:
raise
return 0
class CmdResult():
def __init__(self, exitcode, stdout, stderr):
self.exitcode = exitcode
self.stdout = stdout
self.stderr = stderr
def run(argv, stdin=None):
'''Runs an exoline command, translating stdin from
string and stdout to string. Returns a CmdResult.'''
old = {'stdin': sys.stdin, 'stdout': sys.stdout, 'stderr': sys.stderr}
try:
if stdin is None:
stdin = sys.stdin
elif isinstance(stdin, six.string_types):
sio = StringIO()
if six.PY3:
sio.write(stdin)
else:
sio.write(stdin.encode('utf-8'))
sio.seek(0)
stdin = sio
stdout = StringIO()
stderr = StringIO()
exitcode = cmd(argv=argv, stdin=stdin, stdout=stdout, stderr=stderr)
stdout.seek(0)
stdout = stdout.read().strip() # strip to get rid of leading newline
stderr.seek(0)
stderr = stderr.read().strip()
finally:
# restore stdout, stderr, stdin
sys.stdin = old['stdin']
sys.stdout = old['stdout']
sys.stderr = old['stderr']
return CmdResult(exitcode, stdout, stderr)
if __name__ == '__main__':
sys.exit(cmd(sys.argv))
# vim: set ai et sw=4 ts=4 :
|
danslimmon/exoline
|
exoline/exo.py
|
Python
|
bsd-3-clause
| 141,519
|
[
"VisIt"
] |
7e55ffe3a39b77d419a71ae28f0a455ffb2711033dc34b8355c52be54c9fc054
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# A python script to remove outdated dependency files and associated objects. It operates from any
# directory within the herd. It cleans out the moose directory and any herd animials included in the
# Makefile.
#
# The script requires one input parameter, the root directory of MOOSE (e.g., ~/projects/trunk)
# Load the necessary packages
import os, sys, re
## A function for locating dependency herd animials
# Reads "Makefile" and searches for *.mk includes to build a list of directories to search
# for dependencies
#
# @param root_dir The MOOSE root directory (e.g., ~/projects/trunk/)
#
# @return A list of directories to clean
#
def findDepDirs(root_dir):
# Test the moose_dir is valid
if not os.path.isdir(root_dir):
print "ERROR: Supplied MOOSE root directory (" + root_dir + ") does not exist."
sys.exit(1)
# Open the Make file
file = open(os.getcwd() + "/Makefile")
# Storage for dependency directories, always include the moose directory
if os.environ.get('MOOSE_DEV') == "true":
dep_dirs = [os.path.join(root_dir, "devel", "moose")]
else:
dep_dirs = [os.path.join(root_dir, "moose")]
# Loop through the file and look for include statements for .mk files
for line in file:
x = re.search('include .*\/(\w+)\.mk' , line)
# Store all *.mk includes other than the moose.mk and build.mk
if x and x.group(1) != "build" and x.group(1) != "moose":
dep_dirs.append(os.path.join(root_dir, x.group(1)))
# Close file, return the list of directories to clean
file.close()
return dep_dirs
## Cleans outdated dependencies for supplied directory
# Recursively searches the directory for *.d files, then searches the *.d file for headers (*.h).
# If the header does not exist the *.d and associated object file is removed.
#
# @param cur_dir Full path to the directory that needs to be cleaned
#
def cleanDepDirs(cur_dir):
# Walk through, recursively, the sub-directories and build a list of directories to search
dep_files = []
for path, dirs, files in os.walk(cur_dir):
# Define the top-level directory
top = re.search('.*\/(.*)', path)
top = top.group(1)
# If the top-level is a dot directory or if it is the "doc" directory, do nothing,
# else search for *.d files and append the list
if not top.startswith(".") and not path.endswith("doc"):
for f in files:
if f.endswith(".d"):
dep_files.append(os.path.join(path, f))
# Loop through all dep files
for cur_dep_file in dep_files:
# Open the *.d file, extract the lines, and close the file. The file should
# be closed before continuing since it may get deleted below
file = open(cur_dep_file)
dep_file_lines = file.read().splitlines()
file.close()
# Loop through each line in the file, if it is a header, check that it exists
# If it does not exist delete the *.d file and the coresponding object file
for line in dep_file_lines:
# Search line for header file name, ignore leading and trailing whitespace
hdr = re.search('\s*(.*\.h)', line)
# If the dep. file is a header and does not exist the dep. file is outdated
if hdr and not os.path.isfile(hdr.group(1)):
# Print a message that if the outdated dependency and remove the *.d file
print " " + cur_dep_file + " is out of date, it is being removed"
os.remove(cur_dep_file)
# "Removing only the dependency file may be insufficient, and may in fact lead to
# an incorrect build -- we also need to remove the object file associated with the
# dependency file we removed...we can hopefully get the name of the object file by
# stripping off the .d from the dependency file's filename." -JP
x = re.search('(.*)\.d', cur_dep_file)
if x and os.path.isfile(x.group(1)):
# Print a message for the outdated object file and remove it
print " " + x.group(1) + " is out of date, it is being removed"
os.remove(x.group(1))
# Stop the looping over the lines of this *.d file
break
# ENTRY POINT
if len(sys.argv) == 2:
# Print a message
print "====== Cleaning outdated dependencies ======"
# Locate the directories to clean
dep_dirs = findDepDirs(sys.argv[1])
# Clean each of the directories
for dir in dep_dirs:
print " Cleaning: " + dir
cleanDepDirs(dir)
else:
# Report an error
print "ERROR: You must supply the root MOOSE directory (e.g., ~/project/trunk)"
sys.exit(1)
|
nuclear-wizard/moose
|
framework/scripts/rm_outdated_deps.py
|
Python
|
lgpl-2.1
| 5,159
|
[
"MOOSE"
] |
9b02126c9582bd66ea133b53841397caa0436e7c47270201012ab485456abb67
|
#!/usr/bin/env python
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
import mock
import logging
import MooseDocs
import mooseutils
from moosesqa import SQAReport, SQADocumentReport, LogHelper
@unittest.skipIf(mooseutils.git_version() < (2,11,4), "Git version must at least 2.11.4")
class TestSQADocumentReport(unittest.TestCase):
def setUp(self):
SQADocumentReport.FILE_CACHE = MooseDocs.PROJECT_FILES
@mock.patch('mooseutils.colorText', side_effect=lambda t, c, **kwargs: t)
def testReport(self, color_text):
# PASS
reporter = SQADocumentReport(required_documents=['rtm', 'google'], rtm='moose_rtm.md', google='https://www.google.com')
r = reporter.getReport()
self.assertEqual(reporter.status, SQAReport.Status.PASS)
self.assertIn('log_rtm: 0', r)
self.assertIn('log_google: 0', r)
# ERROR with missing doc
reporter = SQADocumentReport(required_documents=['rtm', 'google'], rtm='moose_rtm.md')
r = reporter.getReport()
self.assertEqual(reporter.status, SQAReport.Status.ERROR)
self.assertIn('log_rtm: 0', r)
self.assertIn('log_google: 1', r)
# WARNING with missing doc
reporter = SQADocumentReport(required_documents=['rtm', 'google'], rtm='moose_rtm.md', log_google='WARNING')
r = reporter.getReport()
self.assertEqual(reporter.status, SQAReport.Status.WARNING)
self.assertIn('log_rtm: 0', r)
self.assertIn('log_google: 1', r)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
harterj/moose
|
python/moosesqa/test/test_SQADocumentReport.py
|
Python
|
lgpl-2.1
| 1,836
|
[
"MOOSE"
] |
d15bb5d9f8f5998fc12ea80c7f6cdb72429811a005a2f9345f62b7bf7ce75a41
|
# Gaussian discriminant analysis in 2d
# Author: Duane Rich
# Based on matlab code by Kevin Murphy
#https://github.com/probml/pmtk3/blob/master/demos/discrimAnalysisDboundariesDemo.m
import superimport
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = '../figures'
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
c = 'bgr'
m = 'xos'
n_samples = 30 # number of each class samples
model_names = ('LDA', 'QDA')
np.random.seed(0)
def mvn2d(x, y, u, sigma):
xx, yy = np.meshgrid(x, y)
xy = np.c_[xx.ravel(), yy.ravel()]
sigma_inv = np.linalg.inv(sigma)
z = np.dot((xy - u), sigma_inv)
z = np.sum(z * (xy - u), axis=1)
z = np.exp(-0.5 * z)
z = z / (2 * np.pi * np.linalg.det(sigma) ** 0.5)
return z.reshape(xx.shape)
# Each model specifies the means and covariances.
# If the covariances are equal across classes, dboundarioes
# will be linear even if we use QDA
def is_pos_def(x):
return np.all(np.linalg.eigvals(x) > 0)
model1 = ([[1.5, 1.5], [-1.5, -1.5]],
[np.eye(2)] * 2)
model2 = ([[1.5, 1.5], [-1.5, -1.5]],
[[[1.5, 0], [0, 1]], np.eye(2) * 0.7])
model3 = ([[0, 0], [0, 5], [5, 5]],
[np.eye(2)] * 3)
Sigma1 = np.array([[4, 1], [1, 2]])
Sigma2 = np.array([[2, 0], [0, 1]])
Sigma3 = np.eye(2)
model4 = ([[0, 0], [0, 5], [5, 5]],
[Sigma1, Sigma2, Sigma3])
models = [model1, model2, model3, model4]
models = [model4]
for n_th, (u, sigma) in enumerate(models):
# generate random points
x = [] # store sample points
y = [] # store class labels
nclasses = len(u) # means
for i in range(nclasses):
x.append(np.random.multivariate_normal(u[i], sigma[i], n_samples))
y.append([i] * n_samples)
points = np.vstack(x)
labels = np.hstack(y)
x_min, y_min = np.min(points, axis=0)
x_max, y_max = np.max(points, axis=0)
N = 100
x_range = np.linspace(x_min - 1, x_max + 1, N)
y_range = np.linspace(y_min - 1, y_max + 1, N)
xx, yy = np.meshgrid(x_range, y_range)
for k, model in enumerate((LDA(), QDA())):
#fit, predict
clf = model
clf.fit(points, labels)
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(N, N)
z_p = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
#draw areas and boundries
plt.figure()
plt.pcolormesh(xx, yy, z)
plt.jet()
for j in range(nclasses):
plt.contour(xx, yy, z_p[:, j].reshape(N, N),
[0.5], lw=3, colors='k')
#draw points
for i, point in enumerate(x):
plt.plot(point[:, 0], point[:, 1], c[i] + m[i])
#draw contours
for i in range(nclasses):
prob = mvn2d(x_range, y_range, u[i], sigma[i])
cs = plt.contour(xx, yy, prob, colors=c[i])
plt.title('Seperate {0} classes using {1}'.
format(nclasses, model_names[k]))
save_fig('discrimAnalysisDboundariesDemo{}.pdf'.format(n_th * 2 + k))
plt.show()
|
probml/pyprobml
|
scripts/discrim_analysis_dboundaries_plot.py
|
Python
|
mit
| 3,224
|
[
"Gaussian"
] |
42eeb1ea530f72f3fcd4c9549649776cbc10b48fface320d0d644c454a10f873
|
#!/usr/bin/env python
""" Mission 7-Detect and Deliver
1. Random walk with gaussian at center of map until station position is acquired
2. loiter around until correct face seen
3. if symbol seen, move towards symbol perpendicularly
4. if close enough, do move_base aiming
task 7:
-----------------
Created by Reinaldo@ 2016-12-07
Authors: Reinaldo
-----------------
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
import random
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_waypoint import MoveTo
from move_base_loiter import Loiter
from move_base_stationkeeping import StationKeeping
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import Int8
class DetectDeliver(object):
map_dim = [[0, 40], [0, 40]]
MAX_DATA=10
x0, y0, yaw0= 0, 0, 0
symbol=[0 , 0]
symbols=np.zeros((MAX_DATA, 2)) #unordered list
symbols_counter=0
angle_threshold=10*math.pi/180
symbol_location=np.zeros((MAX_DATA, 2))
shape_counter=0
distance_to_box=3
def __init__(self, symbol_list):
print("starting task 7")
rospy.init_node('task_7', anonymous=True)
self.symbol=symbol_list
self.symbol_visited=0
self.symbol_seen=False
self.symbol_position=[0, 0, 0]
self.station_seen=False #station here is cluster center of any face
self.station_position=[0, 0]
self.loiter_obj = Loiter("loiter", is_newnode=False, target=None, radius=5, polygon=4, mode=2, mode_param=1, is_relative=False)
self.moveto_obj = MoveTo("moveto", is_newnode=False, target=None, is_relative=False)
self.stationkeep_obj = StationKeeping("station_keeping", is_newnode=False, target=None, radius=2, duration=30)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.symbol_callback, queue_size = 50)
rospy.Subscriber("/finished_search_and_shoot", Int8, self.stop_shoot_callback, queue_size = 5)
self.shooting_pub= rospy.Publisher('/start_search_and_shoot', Int8, queue_size=5)
self.marker_pub= rospy.Publisher('/waypoint_markers', Marker, queue_size=5)
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
self.odom_received = False
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
print(self.symbol)
while not rospy.is_shutdown() and not self.station_seen:
self.moveto_obj.respawn(self.random_walk(), )#forward
print("station: ")
print(self.station_position)
#loiter around station until symbol's face seen
loiter_radius=math.sqrt((self.x0-self.station_position[0])**2+(self.y0-self.station_position[1])**2)
if loiter_radius>10:
loiter_radius=10
while not rospy.is_shutdown():
print(loiter_radius)
self.loiter_obj.respawn(self.station_position, loiter_radius, )
if loiter_radius>4:
loiter_radius-=2
if self.symbol_seen:
print(self.symbol_position)
print("symbol's position acquired, exit loitering")
break
time.sleep(1)
print(self.symbol_position)
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
counter=0
print(d)
#moveto an offset, replan in the way
while not rospy.is_shutdown():
alpha=self.yaw0-self.symbol_position[2]
theta=math.atan2(math.fabs(math.sin(alpha)), math.fabs(math.cos(alpha))) #always +ve and 0-pi/2
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
perpendicular_d=0.6*d*math.cos(theta)
if counter ==0 or theta>self.angle_threshold or d>self.distance_to_box:
print("replan")
target=[self.symbol_position[0]+perpendicular_d*math.cos(self.symbol_position[2]),self.symbol_position[1]+perpendicular_d*math.sin(self.symbol_position[2]), -self.symbol_position[2]]
self.moveto_obj.respawn(target, )
counter+=1
if d<self.distance_to_box:
break
time.sleep(1)
#aiming to the box
self.shooting_complete=False
self.is_aiming=False
print("aiming to box")
print("start shooting module")
self.shooting_pub.publish(1)
station=[self.x0, self.y0, -self.symbol_position[2]]
radius=2
duration=30
print(self.symbol_position)
print(station)
while not rospy.is_shutdown():
self.shooting_pub.publish(1)
#duration 0 is forever
if not self.is_aiming:
self.stationkeep_obj.respawn(station, radius, duration)
#make aiming respawn
if self.shooting_complete:
print("shooting done, return to base")
break
time.sleep(1)
def stop_shoot_callback(self, msg):
if msg.data==1:
#stop aiming station
self.shooting_complete=True
def random_walk(self):
""" create random walk points and more favor towards center """
x = random.gauss(np.mean(self.map_dim[0]), 0.25 * np.ptp(self.map_dim[0]))
y = random.gauss(np.mean(self.map_dim[1]), 0.25 * np.ptp(self.map_dim[1]))
return self.map_constrain(x, y)
def map_constrain(self, x, y):
""" constrain x and y within map """
if x > np.max(self.map_dim[0]):
x = np.max(self.map_dim[0])
elif x < np.min(self.map_dim[0]):
x = np.min(self.map_dim[0])
else:
x = x
if y > np.max(self.map_dim[1]):
y = np.max(self.map_dim[1])
elif y < np.min(self.map_dim[1]):
y = np.min(self.map_dim[1])
else:
y = y
return [x, y, 0]
def symbol_callback(self, msg):
if len(msg.markers)>0:
if self.symbols_counter>self.MAX_DATA:
station_kmeans = KMeans(n_clusters=1).fit(self.symbols)
self.station_center=station_kmeans.cluster_centers_
self.station_position[0]=self.station_center[0][0]
self.station_position[1]=self.station_center[0][1]
self.station_seen=True
for i in range(len(msg.markers)):
self.symbols[self.symbols_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.symbols_counter+=1
if msg.markers[i].type==self.symbol[0] and msg.markers[i].id==self.symbol[1]:
#set position_list (not sure)
self.symbol_position[0]=msg.markers[i].pose.position.x
self.symbol_position[1]=msg.markers[i].pose.position.y
x = msg.markers[i].pose.orientation.x
y = msg.markers[i].pose.orientation.y
z = msg.markers[i].pose.orientation.z
w = msg.markers[i].pose.orientation.w
_, _, self.symbol_position[2] = euler_from_quaternion((x, y, z, w))
self.symbol_location[self.shape_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.shape_counter+=1
if self.station_seen and self.shape_counter>self.MAX_DATA:
symbol_kmeans = KMeans(n_clusters=1).fit(self.symbol_location)
self.symbol_center=symbol_kmeans.cluster_centers_
self.symbol_position[0]=self.symbol_center[0][0]
self.symbol_position[1]=self.symbol_center[0][1]
#print(self.symbol_position)
self.symbol_seen=True
#self.pool.apply(cancel_loiter)
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
#[id,type]cruciform red
DetectDeliver([1,0])
except rospy.ROSInterruptException:
rospy.loginfo("Task 7 Finished")
|
ron1818/Singaboat_RobotX2016
|
robotx_nav/nodes/task7_non_process_2.py
|
Python
|
gpl-3.0
| 8,074
|
[
"Gaussian"
] |
3fc6195ba5f34e5d47bb6dc08e4a33202648dfa4b662b823e8c9d492b5d1c2d9
|
#---------------------------------
# PIPELINE RUN
#---------------------------------
# The configuration settings to run the pipeline. These options are overwritten
# if a new setting is specified as an argument when running the pipeline.
# These settings include:
# - logDir: The directory where the batch queue scripts are stored, along with
# stdout and stderr dumps after the job is run.
# - logFile: Log file in logDir which all commands submitted are stored.
# - style: the style which the pipeline runs in. One of:
# - 'print': prints the stages which will be run to stdout,
# - 'run': runs the pipeline until the specified stages are finished, and
# - 'flowchart': outputs a flowchart of the pipeline stages specified and
# their dependencies.
# - procs: the number of python processes to run simultaneously. This
# determines the maximum parallelism of the pipeline. For distributed jobs
# it also constrains the maximum total jobs submitted to the queue at any one
# time.
# - verbosity: one of 0 (quiet), 1 (normal), 2 (chatty).
# - end: the desired tasks to be run. Rubra will also run all tasks which are
# dependencies of these tasks.
# - force: tasks which will be forced to run, regardless of timestamps.
# - rebuild: one of 'fromstart','fromend'. Whether to calculate which
# dependencies will be rerun by working back from an end task to the latest
# up-to-date task, or forward from the earliest out-of-date task. 'fromstart'
# is the most conservative and commonly used as it brings all intermediate
# tasks up to date.
# - manager: "pbs" or "slurm"
pipeline = {
"logDir": "log",
"logFile": "pipeline_commands.log",
"style": "print",
"procs": 16,
"verbose": 2,
"end": ["fastQCSummary", "voom", "edgeR", "qcSummary"],
"force": [],
"rebuild": "fromstart",
"manager": "slurm",
}
# This option specifies whether or not you are using VLSCI's Merri or Barcoo
# cluster. If True, this changes java's tmpdir to the job's tmp dir on
# /scratch ($TMPDIR) instead of using the default /tmp which has limited space.
using_merri = True
# Optional parameter governing how Ruffus determines which part of the
# pipeline is out-of-date and needs to be re-run. If set to False, Ruffus
# will work back from the end target tasks and only execute the pipeline
# after the first up-to-date tasks that it encounters.
# Warning: Use with caution! If you don't understand what this option does,
# keep this option as True.
maximal_rebuild_mode = True
#---------------------------------
# CONFIG
#---------------------------------
# Name of analysis. Changing the name will create new sub-directories for
# voom, edgeR, and cuffdiff analysis.
analysis_name = "analysis_v1"
# The directory containing *.fastq.gz read files.
raw_seq_dir = "/path_to_project/fastq_files/"
# Path to the CSV file with sample information regarding condition and
# covariates if available.
samples_csv = "/path_to_project/fastq_files/samples.csv"
# Path to the CSV file with which comparisons to make.
comparisons_csv = "/path_to_project/fastq_files/comparisons.csv"
# The output directory.
output_dir = "/path_to_project/results/"
# Sequencing platform for read group information.
platform = "Illumina"
# If the experiment is paired-end or single-end: True (PE) or False (SE).
paired_end = False
# Whether the experiment is strand specific: "yes", "no", or "reverse".
stranded = "no"
#---------------------------------
# REFERENCE FILES
#---------------------------------
# Most reference files can be obtained from the Illumina iGenomes project:
# http://cufflinks.cbcb.umd.edu/igenomes.html
# Bowtie 2 index files: *.1.bt2, *.2.bt2, *.3.bt2, *.4.bt2, *.rev.1.bt2,
# *.rev.2.bt2.
genome_ref = "/vlsci/VR0002/shared/Reference_Files/Indexed_Ref_Genomes/bowtie_Indexed/human_g1k_v37"
# Genome reference FASTA. Also needs an indexed genome (.fai) and dictionary
# (.dict) file in the same directory.
genome_ref_fa = "/vlsci/VR0002/shared/Reference_Files/Indexed_Ref_Genomes/bowtie_Indexed/human_g1k_v37.fa"
# Gene set reference file (.gtf). Recommend using the GTF file obtained from
# Ensembl as Ensembl gene IDs are used for annotation (if specified).
gene_ref = "/vlsci/VR0002/shared/Reference_Files/Indexed_Ref_Genomes/TuxedoSuite_Ref_Files/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.gtf"
# Either a rRNA reference fasta (ending in .fasta or .fa) or an GATK interval
# file (ending in .list) containing rRNA intervals to calculate the rRNA
# content. Can set as False if not available.
# rrna_ref = "/vlsci/VR0002/shared/Reference_Files/rRNA/human_all_rRNA.fasta"
rrna_ref = "/vlsci/VR0002/shared/jchung/human_reference_files/human_rRNA.list"
# Optional tRNA and rRNA sequences to filter out in Cuffdiff (.gtf or .gff).
# Set as False if not provided.
cuffdiff_mask_file = False
#---------------------------------
# TRIMMOMATIC PARAMETERS
#---------------------------------
# Parameters for Trimmomatic (a tool for trimming Illumina reads).
# http://www.usadellab.org/cms/index.php?page=trimmomatic
# Path of a FASTA file containing adapter sequences used in sequencing.
adapter_seq = "/vlsci/VR0002/shared/jchung/human_reference_files/TruSeqAdapters.fa"
# The maximum mismatch count which will still allow a full match to be
# performed.
seed_mismatches = 2
# How accurate the match between the two 'adapter ligated' reads must be for
# PE palindrome read alignment.
palendrome_clip_threshold = 30
# How accurate the match between any adapter etc. sequence must be against a
# read.
simple_clip_threshold = 10
# The minimum quality needed to keep a base and the minimum length of reads to
# be kept.
extra_parameters = "LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36"
# Output Trimmomatic log file
write_trimmomatic_log = True
#---------------------------------
# R PARAMETERS
#---------------------------------
# Get annotations from Ensembl BioMart. GTF file needs to use IDs from Ensembl.
# Set as False to skip annotation, else
# provide the name of the dataset that will be queried. Attributes to be
# obtained include gene symbol, chromosome name, description, and gene biotype.
# Commonly used datasets:
# human: "hsapiens_gene_ensembl"
# mouse: "mmusculus_gene_ensembl"
# rat: "rnorvegicus_gene_ensembl"
# You can list all available datasets in R by using the listDatasets fuction:
# > library(biomaRt)
# > listDatasets(useMart("ensembl"))
# The gene symbol is obtained from the attribute "hgnc_symbol" (human) or
# "mgi_symbol" (mice/rats) if available. If not, the "external_gene_id" is used
# to obtain the gene symbol. You can change this by editing the script:
# scripts/combine_and_annotate.r
annotation_dataset = "hsapiens_gene_ensembl"
#---------------------------------
# SCRIPT PATHS
#---------------------------------
# Paths to other wrapper scripts needed to run the pipeline. Make sure these
# paths are relative to the directory where you plan to run the pipeline in or
# change them to absolute paths.
html_index_script = "scripts/html_index.py"
index_script = "scripts/build_index.sh"
tophat_script = "scripts/run_tophat.sh"
merge_tophat_script = "scripts/merge_tophat.sh"
fix_tophat_unmapped_reads_script = "scripts/fix_tophat_unmapped_reads.py"
htseq_script = "scripts/run_htseq.sh"
fastqc_parse_script = "scripts/fastqc_parse.py"
qc_parse_script = "scripts/qc_parse.py"
alignment_stats_script = "scripts/alignment_stats.sh"
combine_and_annotate_script = "scripts/combine_and_annotate.R"
de_analysis_script = "scripts/de_analysis.R"
#---------------------------------
# PROGRAM PATHS
#---------------------------------
trimmomatic_path = "/usr/local/trimmomatic/0.30/trimmomatic-0.30.jar"
reorder_sam_path = "/usr/local/picard/1.69/lib/ReorderSam.jar"
mark_duplicates_path = "/usr/local/picard/1.69/lib/MarkDuplicates.jar"
rnaseqc_path = "/usr/local/rnaseqc/1.1.7/RNA-SeQC_v1.1.7.jar"
add_or_replace_read_groups_path = "/usr/local/picard/1.69/lib/AddOrReplaceReadGroups.jar"
|
jessicachung/rna_seq_pipeline
|
pipeline_config.py
|
Python
|
mit
| 8,036
|
[
"Bowtie"
] |
b39e2c01330a27715af588bf29733cb8d632944cd3f76106f5925aff68022338
|
LOGO_STRING = """
.o88Oo._
d8P .ooOO8bo._
88 '*Y8bo.
YA '*Y8b __
YA 68o68**8Oo.
"8D *"' "Y8o
Y8 'YB .8D
'8 d8' 8D
8 d8888b d AY
Y, d888888 d' _.oP"
q. Y8888P' d8
"q. `Y88P' d8"
Y ,o8P
oooo888P"
"""
COALA_BEAR_LOGO = LOGO_STRING.split('\n')
WELCOME_MESSAGES = ['Hi there! Awesome you decided to do some high '
'quality coding. coala is just the tool you need!',
'You can configure coala to suit your needs. This '
'is done with a settings file called a `.coafile` '
'in the project directory.',
"We can help you with that. Let's get started with "
'some basic questions.']
GLOB_HELP_URL = 'http://coala.readthedocs.io/en/latest/Users/Glob_Patterns.html'
GLOB_HELP = """
File globs are a very concise way to specify a large
number of files. You may give multiple file globs
separated by commas. To learn more about glob patterns
please visit: {}
For example, you may want to include your src/ folder and
all its contents but exclude your .git directory and all
.o files. To do this, simply give `src/` for the first
question and `.git/**,**/*.o` for the second question.
""".format(GLOB_HELP_URL)
BEAR_DOCS_URL = ('https://github.com/coala/bear-docs/blob/master/'
'README.rst#supported-languages')
BEAR_HELP = """
A coala bear is a plugin that contains the checking routines. It may be
language specific or language independent. This makes coala completely
modularized and extensible. Many languages including C/C++, Python,
JavaScript are supported out-of-the-box.
You can see all of them here: {}
""".format(BEAR_DOCS_URL)
|
MalkmusT/coala-quickstart
|
coala_quickstart/Strings.py
|
Python
|
agpl-3.0
| 1,942
|
[
"VisIt"
] |
01bd7dab6b55a4c81287bb292ade3c64e73e6c5952f1bdce857a36ed5ae15cdb
|
import numpy as np
import pyle.envelopes as env
from pyle.dataking import utilMultilevels as ml
import util
# sequence helpers
# these are simple functions for building standard control envelopes by
# pulling out appropriate configuration parameters for a particular qubit.
def power2amp(power):
"""
Convert readout rms uwave power in to DAC amplitude.This function assumes a nonzero sb_freq.
This function contains several hardcoded values and should be reworked.
"""
assert power.isCompatible('dBm'), 'Power must be put in dBm.'
power = power['dBm']
#v = 10**(((power-10)/10.0+2)/2) #old shit
Z=50 #impedance of system
mixingAndCablingLossInDb=30 #loss due to mixing, coupling, filters etc... on the DAC board 30 dB typically: 10 from the IQ mixer, 10 from the directional coupler and 10 from attenuators
rmsv = np.sqrt(Z * 10 **( (power+mixingAndCablingLossInDb) /10) * 1e-3 ) #power is rms power in dBm, i.e. 0 dBm = 1 mW
v = rmsv*np.sqrt(2) #amplitude of voltage, non-rms
dacamp = v/0.4 # dac_amp = 1 corresponds to 400mV
if dacamp>1.0:
print 'dacamp too big: ', dacamp
elif dacamp<(0.03125):
print 'dacamp too small (using less then 8 bits): ', dacamp
#else:
#print 'dacamp: ', dacamp
return dacamp
def mix(q, seq, freq=None, state=None):
"""Apply microwave mixing to a sequence.
This mixes to a particular frequency from the carrier frequency.
Also, adjusts the microwave phase according to the phase calibration.
PARAMETERS
q: Qubit dictionary.
seq - eh functions: Pulses to mix with microwaves.
freq - string: Registry key indicating desired frequency of post-mix
pulse (e.g., 'f10','f21').
state - scalar: Which qubit frequency is desired for post-mix pulse
(e.g., 1 gives f10, 2 gives f21).
"""
if freq is not None and state is not None:
raise Exception('state and freq are not orthogonal parameters for mixing')
if isinstance(freq, str):
freq = q[freq]
if freq is None:
if state is None:
state=1
freq = ml.getMultiLevels(q,'frequency',state)
return env.mix(seq, freq - q['fc']) * np.exp(1j*q['uwavePhase'])
# xy rotations with half-derivative term on other quadrature
def piPulseHD(q, t0, phase=0, alpha=0.5, state=1, length='piFWHM'):
"""Pi pulse using a gaussian envelope with half-derivative Y quadrature."""
return rotPulseHD(q, t0, angle=np.pi, phase=phase, state=state, length=length)
def piHalfPulseHD(q, t0, phase=0, alpha=0.5, state=1, length='piFWHM'):
"""Pi/2 pulse using a gaussian envelope with half-derivative Y quadrature."""
return rotPulseHD(q, t0, angle=np.pi/2, phase=phase, state=state, length=length)
def rotPulseHD(q, t0, angle=np.pi, phase=0, alpha=0.5, state=1, length='piFWHM'):
"""Rotation pulse using a gaussian envelope with half-derivative Y quadrature.
This also allows for an arbitrary pulse length. The new length must be defined as a key in the registry.
"""
# Eliminate DRAG for higher order pulses
if state>1: alpha = 0
#Get the pi amplitude. getMultiLevels() ensures that the correct key is read regardless of which state is desired.
#Note in particular that old code, which does not explicitly set state, and therefore gets the default value of 1,
#will get 'piAmp', as desired.
piamp = ml.getMultiLevels(q,'piAmp',state)
r = angle / np.pi
delta = 2*np.pi * (q['f21'] - q['f10'])['GHz']
x = env.gaussian(t0, w=q[length], amp=piamp*r, phase=phase)
y = -alpha * env.deriv(x) / delta
return x + 1j*y
def rabiPulseHD(q, t0, len, w=None, amp=None, overshoot=0.0, overshoot_w=1.0, alpha=0.5, state=1):
"""Rabi pulse using a flattop envelope with half-derivative Y quadrature."""
# Eliminate DRAG for higher order pulses
if state>1: alpha = 0
#Get the pi amplitude. getMultiLevels() ensures that the correct key is read regardless of which state is desired.
#Note in particular that old code, which does not explicitly set state, and therefore gets the default value of 1,
#will get 'piAmp', as desired.
if amp is None:
amp = ml.getMultiLevels(q,'piAmp',state)
if w is None:
w=q['piFWHM']
delta = 2*np.pi * (q['f21'] - q['f10'])['GHz']
x = env.flattop(t0, len, w, amp, overshoot, overshoot_w)
y = -alpha * env.deriv(x) / delta
return x + 1j*y
# z rotations
def piPulseZ(q, t0):
"""Pi pulse using a gaussian envelope."""
return rotPulseZ(q, t0, angle=np.pi)
def piHalfPulseZ(q, t0):
"""Pi/2 pulse using a gaussian envelope."""
return rotPulseZ(q, t0, angle=np.pi/2)
def rotPulseZ(q, t0, angle=np.pi):
"""Rotation pulse using a gaussian envelope."""
r = angle / np.pi
return env.gaussian(t0, w=q['piFWHMZ'], amp=q['piAmpZ']*r)
# default pulse type is half-derivative
piPulse = piPulseHD
piHalfPulse = piHalfPulseHD
rotPulse = rotPulseHD
def spectroscopyPulse(q, t0, df=0):
dt = q['spectroscopyLen']
amp = q['spectroscopyAmp']
return env.mix(env.flattop(t0, dt, w=q['piFWHM'], amp=amp), df)
def measurePulse(q, t0, state=1):
"""Add a measure pulse for the desired state.
PARAMETERS
q: Qubit dictionary.
t0 - value [us]: Time to start the measure pulses.
state - scalar: Which state's measure pulse to use.
"""
return env.trapezoid(t0, 0, q['measureLenTop'], q['measureLenFall'], ml.getMultiLevels(q,'measureAmp',state))
def measurePulse2(q, t0):
return env.trapezoid(t0, 0, q['measureLenTop2'], q['measureLenFall2'], q['measureAmp2'])
def readoutPulse(q, t0):
dt = q['readoutLen']
amp = power2amp(q['readout power'])
df = q['readout frequency'] - q['readout fc']
return env.mix(env.flattop(t0, dt, w=q['readoutWidth'], amp=amp), df)
def boostState(q, t0, state):
"""Excite the qubit to the desired state, concatenating pi pulses as needed.
PARAMETERS
q: Qubit dictionary.
t0 - value [ns]: Time to start the pulses (center of first pi pulse).
state - scalar: State to which qubit should be excited.
"""
xypulse = env.NOTHING
for midstate in range(state):
xypulse = xypulse + mix(q, piPulse(q, t0+midstate*q['piLen'], state=(midstate+1)), state=(midstate+1))
return xypulse
# sequence corrections
def correctCrosstalkZ(qubits):
"""Adjust the z-pulse sequences on all qubits for z-xtalk."""
biases = [q.z for q in qubits]
for q in qubits:
coefs = list(q['calZpaXtalkInv'])
q.z = sum(float(c) * bias for c, bias in zip(coefs, biases))
|
McDermott-Group/LabRAD
|
LabRAD/TestScripts/fpgaTest/pyle/pyle/dataking/envelopehelpers.py
|
Python
|
gpl-2.0
| 6,645
|
[
"Gaussian"
] |
df7b56bc0a4841d419a9ece25662bca71e034b998b5e4fe420967a6df6724649
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for logical expressions.
e.g. `a and b -> tf.logical_and(a, b)`. This is not done automatically in TF.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
# TODO(mdan): Properly extrack boolean ops according to lazy eval rules.
# Note that this isn't completely safe either, because tensors may have control
# dependencies.
# Note that for loops that should be done after the loop was converted to
# tf.while_loop so that the expanded conditionals are properly scoped.
# Used to signal that an operand is safe for non-lazy evaluation.
SAFE_BOOLEAN_OPERAND = 'SAFE_BOOLEAN_OPERAND'
class LogicalExpressionTransformer(converter.Base):
"""Converts logical expressions to corresponding TF calls."""
def __init__(self, ctx):
super(LogicalExpressionTransformer, self).__init__(ctx)
# TODO(mdan): For completeness and consistency, overload everything.
self.op_mapping = {
gast.And: 'ag__.and_',
gast.Eq: 'ag__.eq',
gast.NotEq: 'ag__.not_eq',
gast.Lt: 'ag__.lt',
gast.LtE: 'ag__.lt_e',
gast.Gt: 'ag__.gt',
gast.GtE: 'ag__.gt_e',
gast.Is: 'ag__.is_',
gast.IsNot: 'ag__.is_not',
gast.In: 'ag__.in_',
gast.Not: 'ag__.not_',
gast.NotIn: 'ag__.not_in',
gast.Or: 'ag__.or_',
gast.USub: 'ag__.u_sub',
}
def _expect_simple_symbol(self, operand):
if isinstance(operand, gast.Name):
return
if anno.hasanno(operand, SAFE_BOOLEAN_OPERAND):
return
raise NotImplementedError(
'only simple local variables are supported in logical and compound '
'comparison expressions; for example, we support "a or b" but not '
'"a.x or b"; for a workaround, assign the expression to a local '
'variable and use that instead, for example "tmp = a.x", "tmp or b"')
def _has_matching_func(self, operator):
op_type = type(operator)
return op_type in self.op_mapping
def _matching_func(self, operator):
op_type = type(operator)
return self.op_mapping[op_type]
def _as_function(self, func_name, args, args_as_lambda=False):
if args_as_lambda:
args_as_lambda = []
for arg in args:
template = """
lambda: arg
"""
args_as_lambda.append(
templates.replace_as_expression(template, arg=arg))
args = args_as_lambda
if not args:
template = """
func_name()
"""
replacement = templates.replace_as_expression(
template, func_name=parser.parse_expression(func_name))
elif len(args) == 1:
template = """
func_name(arg)
"""
replacement = templates.replace_as_expression(
template, func_name=parser.parse_expression(func_name), arg=args[0])
elif len(args) == 2:
template = """
func_name(arg1, arg2)
"""
replacement = templates.replace_as_expression(
template,
func_name=parser.parse_expression(func_name),
arg1=args[0],
arg2=args[1])
else:
raise NotImplementedError('{} arguments for {}'.format(
len(args), func_name))
anno.setanno(replacement, SAFE_BOOLEAN_OPERAND, True)
return replacement
def visit_Compare(self, node):
node = self.generic_visit(node)
ops_and_comps = list(zip(node.ops, node.comparators))
left = node.left
op_tree = None
# Repeated comparisons are converted to conjunctions:
# a < b < c -> a < b and b < c
while ops_and_comps:
op, right = ops_and_comps.pop(0)
binary_comparison = self._as_function(
self._matching_func(op), (left, right))
if isinstance(left, gast.Name) and isinstance(right, gast.Name):
anno.setanno(binary_comparison, SAFE_BOOLEAN_OPERAND, True)
if op_tree:
self._expect_simple_symbol(right)
op_tree = self._as_function(
'ag__.and_', (op_tree, binary_comparison), args_as_lambda=True)
else:
op_tree = binary_comparison
left = right
assert op_tree is not None
return op_tree
def visit_UnaryOp(self, node):
node = self.generic_visit(node)
return self._as_function(self._matching_func(node.op), (node.operand,))
def visit_BoolOp(self, node):
node = self.generic_visit(node)
node_values = node.values
right = node.values.pop()
self._expect_simple_symbol(right)
while node_values:
left = node_values.pop()
self._expect_simple_symbol(left)
right = self._as_function(
self._matching_func(node.op), (left, right), args_as_lambda=True)
return right
def transform(node, ctx):
return LogicalExpressionTransformer(ctx).visit(node)
|
dongjoon-hyun/tensorflow
|
tensorflow/python/autograph/converters/logical_expressions.py
|
Python
|
apache-2.0
| 5,667
|
[
"VisIt"
] |
d22b38fbdd928d6001f0c7d32551840c40f85a3f2d4c1a29d6734a91ec8c4700
|
#!/usr/bin/env python
import glob, os, os.path, shutil, socket, struct, tarfile, stat
import numpy, sys, presto, time, sigproc, sifting
import psr_utils as pu
import pyfits
institution = "NRAO"
base_tmp_dir = "." # "/dev/shm/" is a good choice
# This is where the output will be archived.
base_output_dir = "."
#-------------------------------------------------------------------
# Tunable parameters for searching and folding
# (you probably don't need to tune any of them)
raw_N = 1440000 # Number of samples to analyze (~118 secs)
rfifind_chunk_time = 25600 * 0.00008192 # ~2.1 sec
singlepulse_threshold = 5.0 # threshold SNR for candidate determination
singlepulse_plot_SNR = 5.5 # threshold SNR for singlepulse plot
singlepulse_maxwidth = 0.1 # max pulse width in seconds
to_prepfold_sigma = 6.0 # incoherent sum significance to fold candidates
max_lo_cands_to_fold = 20 # Never fold more than this many lo-accel candidates
max_hi_cands_to_fold = 10 # Never fold more than this many hi-accel candidates
numhits_to_fold = 2 # Number of DMs with a detection needed to fold
low_DM_cutoff = 1.0 # Lowest DM to consider as a "real" pulsar
lo_accel_numharm = 16 # max harmonics
lo_accel_sigma = 2.0 # threshold gaussian significance
lo_accel_zmax = 0 # bins
lo_accel_flo = 2.0 # Hz
hi_accel_numharm = 8 # max harmonics
hi_accel_sigma = 3.0 # threshold gaussian significance
hi_accel_zmax = 50 # bins
hi_accel_flo = 1.0 # Hz
low_T_to_search = 50.0 # sec
# Sifting specific parameters (don't touch without good reason!)
sifting.sigma_threshold = to_prepfold_sigma-1.0 # incoherent power threshold (sigma)
sifting.c_pow_threshold = 100.0 # coherent power threshold
sifting.r_err = 1.1 # Fourier bin tolerence for candidate equivalence
sifting.short_period = 0.0005 # Shortest period candidates to consider (s)
sifting.long_period = 15.0 # Longest period candidates to consider (s)
sifting.harm_pow_cutoff = 8.0 # Power required in at least one harmonic
foldnsubs = 128 # Number of subbands to use when folding
#-------------------------------------------------------------------
def get_baryv(ra, dec, mjd, T, obs="GB"):
"""
get_baryv(ra, dec, mjd, T):
Determine the average barycentric velocity towards 'ra', 'dec'
during an observation from 'obs'. The RA and DEC are in the
standard string format (i.e. 'hh:mm:ss.ssss' and 'dd:mm:ss.ssss').
'T' is in sec and 'mjd' is (of course) in MJD.
"""
tts = pu.span(mjd, mjd+T/86400.0, 100)
nn = len(tts)
bts = numpy.zeros(nn, dtype=numpy.float64)
vel = numpy.zeros(nn, dtype=numpy.float64)
presto.barycenter(tts, bts, vel, nn, ra, dec, obs, "DE200")
return vel.mean()
def find_masked_fraction(obs):
"""
find_masked_fraction(obs):
Parse the output file from an rfifind run and return the
fraction of the data that was suggested to be masked.
"""
rfifind_out = obs.basefilenm + "_rfifind.out"
for line in open(rfifind_out):
if "Number of bad intervals" in line:
return float(line.split("(")[1].split("%")[0])/100.0
# If there is a problem reading the file, return 100%
return 100.0
def timed_execute(cmd, run_cmd=1):
"""
timed_execute(cmd):
Execute the command 'cmd' after logging the command
to STDOUT. Return the wall-clock amount of time
the command took to execute.
"""
sys.stdout.write("\n'"+cmd+"'\n")
sys.stdout.flush()
start = time.time()
if run_cmd: os.system(cmd)
end = time.time()
return end - start
def get_folding_command(cand, obs, ddplans, maskfile):
"""
get_folding_command(cand, obs, ddplans, maskfile):
Return a command for prepfold for folding the subbands using
an obs_info instance, a list of the ddplans, and a candidate
instance that describes the observations and searches.
"""
# Folding rules are based on the facts that we want:
# 1. Between 24 and 200 bins in the profiles
# 2. For most candidates, we want to search length = 101 p/pd/DM cubes
# (The side of the cube is always 2*M*N+1 where M is the "factor",
# either -npfact (for p and pd) or -ndmfact, and N is the number of bins
# in the profile). A search of 101^3 points is pretty fast.
# 3. For slow pulsars (where N=100 or 200), since we'll have to search
# many points, we'll use fewer intervals in time (-npart 30)
# 4. For the slowest pulsars, in order to avoid RFI, we'll
# not search in period-derivative.
zmax = cand.filename.split("_")[-1]
outfilenm = obs.basefilenm+"_DM%s_Z%s"%(cand.DMstr, zmax)
hidms = [x.lodm for x in ddplans[1:]] + [2000]
dfacts = [x.downsamp for x in ddplans]
for hidm, dfact in zip(hidms, dfacts):
if cand.DM < hidm:
downsamp = dfact
break
if downsamp==1:
fitsfile = obs.fits_filenm
else:
fitsfile = obs.dsbasefilenm+"_DS%d%s"%(downsamp,obs.fits_filenm[obs.fits_filenm.rfind("_"):])
p = 1.0 / cand.f
if (p < 0.002):
Mp, Mdm, N = 2, 2, 24
otheropts = "-npart 50 -ndmfact 3"
elif p < 0.05:
Mp, Mdm, N = 2, 1, 50
otheropts = "-npart 40 -pstep 1 -pdstep 2 -dmstep 3"
elif p < 0.5:
Mp, Mdm, N = 1, 1, 100
otheropts = "-npart 30 -pstep 1 -pdstep 2 -dmstep 1"
else:
Mp, Mdm, N = 1, 1, 200
otheropts = "-npart 30 -nopdsearch -pstep 1 -pdstep 2 -dmstep 1"
return "prepfold -mask %s -noxwin -accelcand %d -accelfile %s.cand -dm %.2f -o %s %s -n %d -npfact %d -ndmfact %d -nsub %d %s" % \
(maskfile, cand.candnum, cand.filename, cand.DM, outfilenm,
otheropts, N, Mp, Mdm, foldnsubs, fitsfile)
class obs_info:
"""
class obs_info(fits_filenm)
A class describing the observation and the analysis.
"""
def __init__(self, fits_filenm):
self.fits_filenm = fits_filenm
self.basefilenm = fits_filenm[:fits_filenm.find(".fits")]
self.dsbasefilenm = fits_filenm[:fits_filenm.rfind("_")]
fitshandle=pyfits.open(fits_filenm)
self.MJD = fitshandle[0].header['STT_IMJD']+fitshandle[0].header['STT_SMJD']/86400.0+fitshandle[0].header['STT_OFFS']/86400.0
self.nchans = fitshandle[0].header['OBSNCHAN']
self.ra_string = fitshandle[0].header['RA']
self.dec_string = fitshandle[0].header['DEC']
self.str_coords = "J"+"".join(self.ra_string.split(":")[:2])
self.str_coords += "".join(self.dec_string.split(":")[:2])
self.nbits=fitshandle[0].header['BITPIX']
self.raw_N=fitshandle[1].header['NAXIS2']*fitshandle[1].header['NSBLK']
self.dt=fitshandle[1].header['TBIN']*1000000
self.raw_T = self.raw_N * self.dt
self.N = raw_N
if self.dt == 163.84:
self.N=self.N/2
self.T = self.N * self.dt
self.srcname=fitshandle[0].header['SRC_NAME']
# Determine the average barycentric velocity of the observation
self.baryv = get_baryv(self.ra_string, self.dec_string,
self.MJD, self.T, obs="GB")
# Where to dump all the results
# Directory structure is under the base_output_directory
# according to base/MJD/filenmbase/beam
self.outputdir = os.path.join(base_output_dir,
str(int(self.MJD)),
self.srcname)
# Figure out which host we are processing on
self.hostname = socket.gethostname()
# The fraction of the data recommended to be masked by rfifind
self.masked_fraction = 0.0
# Initialize our timers
self.rfifind_time = 0.0
self.downsample_time = 0.0
self.dedispersing_time = 0.0
self.FFT_time = 0.0
self.lo_accelsearch_time = 0.0
self.hi_accelsearch_time = 0.0
self.singlepulse_time = 0.0
self.sifting_time = 0.0
self.folding_time = 0.0
self.total_time = 0.0
# Inialize some candidate counters
self.num_sifted_cands = 0
self.num_folded_cands = 0
self.num_single_cands = 0
def write_report(self, filenm):
report_file = open(filenm, "w")
report_file.write("---------------------------------------------------------\n")
report_file.write("%s was processed on %s\n"%(self.fits_filenm, self.hostname))
report_file.write("Ending UTC time: %s\n"%(time.asctime(time.gmtime())))
report_file.write("Total wall time: %.1f s (%.2f hrs)\n"%\
(self.total_time, self.total_time/3600.0))
report_file.write("Fraction of data masked: %.2f%%\n"%\
(self.masked_fraction*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.write(" rfifind time = %7.1f sec (%5.2f%%)\n"%\
(self.rfifind_time, self.rfifind_time/self.total_time*100.0))
report_file.write(" dedispersing time = %7.1f sec (%5.2f%%)\n"%\
(self.dedispersing_time, self.dedispersing_time/self.total_time*100.0))
report_file.write(" single-pulse time = %7.1f sec (%5.2f%%)\n"%\
(self.singlepulse_time, self.singlepulse_time/self.total_time*100.0))
report_file.write(" FFT time = %7.1f sec (%5.2f%%)\n"%\
(self.FFT_time, self.FFT_time/self.total_time*100.0))
report_file.write(" lo-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.lo_accelsearch_time, self.lo_accelsearch_time/self.total_time*100.0))
report_file.write(" hi-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.hi_accelsearch_time, self.hi_accelsearch_time/self.total_time*100.0))
report_file.write(" sifting time = %7.1f sec (%5.2f%%)\n"%\
(self.sifting_time, self.sifting_time/self.total_time*100.0))
report_file.write(" folding time = %7.1f sec (%5.2f%%)\n"%\
(self.folding_time, self.folding_time/self.total_time*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.close()
class dedisp_plan:
"""
class dedisp_plan(lodm, dmstep, dmsperpass, numpasses, numsub, downsamp)
A class describing a de-dispersion plan for prepsubband in detail.
"""
def __init__(self, lodm, dmstep, dmsperpass, numpasses, numsub, downsamp):
self.lodm = float(lodm)
self.dmstep = float(dmstep)
self.dmsperpass = int(dmsperpass)
self.numpasses = int(numpasses)
self.numsub = int(numsub)
self.downsamp = int(downsamp)
self.sub_dmstep = self.dmsperpass * self.dmstep
self.dmlist = [] # These are strings for comparison with filenames
self.subdmlist = []
for ii in range(self.numpasses):
self.subdmlist.append("%.2f"%(self.lodm + (ii+0.5)*self.sub_dmstep))
lodm = self.lodm + ii * self.sub_dmstep
dmlist = ["%.2f"%dm for dm in \
numpy.arange(self.dmsperpass)*self.dmstep + lodm]
self.dmlist.append(dmlist)
def remove_crosslist_duplicate_candidates(candlist1,candlist2):
n1 = len(candlist1)
n2 = len(candlist2)
removelist1 = []
removelist2 = []
candlist2.sort(sifting.cmp_freq)
candlist1.sort(sifting.cmp_freq)
print " Searching for crosslist dupes..."
ii = 0
while ii < n1:
jj=0
while jj < n2:
if numpy.fabs(candlist1[ii].r-candlist2[jj].r) < sifting.r_err:
if sifting.cmp_sigma(candlist1[ii],candlist2[jj])<0:
print "Crosslist remove from candlist 2, %f > %f, %d:%f~%f" % (candlist1[ii].sigma,candlist2[jj].sigma,jj,candlist1[ii].r,candlist2[jj].r)
if jj not in removelist2:
removelist2.append(jj)
else:
print "Crosslist remove from candlist 1, %f > %f, %d:%f~%f" % (candlist2[jj].sigma,candlist1[ii].sigma,ii,candlist1[ii].r,candlist2[jj].r)
if ii not in removelist1:
removelist1.append(ii)
jj += 1
ii += 1
for ii in range(len(removelist2)-1,-1,-1):
print "Removing %d from candlist2" % removelist2[ii]
del(candlist2[removelist2[ii]])
for ii in range(len(removelist1)-1,-1,-1):
print "Removing %d from candlist1" % removelist1[ii]
del(candlist1[removelist1[ii]])
print "Removed %d crosslist candidates\n" % (len(removelist1)+len(removelist2))
print "Found %d candidates. Sorting them by significance...\n" % (len(candlist1)+len(candlist2))
candlist1.sort(sifting.cmp_sigma)
candlist2.sort(sifting.cmp_sigma)
return candlist1,candlist2
def main(fits_filenm, workdir, ddplans):
# Change to the specified working directory
os.chdir(workdir)
# Get information on the observation and the job
job = obs_info(fits_filenm)
if job.raw_T < low_T_to_search:
print "The observation is too short (%.2f s) to search."%job.raw_T
sys.exit()
job.total_time = time.time()
if job.dt == 163.84:
ddplans = ddplans[str(job.nchans)+"slow"]
else:
ddplans = ddplans[str(job.nchans)+"fast"]
# Use whatever .zaplist is found in the current directory
default_zaplist = glob.glob("*.zaplist")[0]
# Make sure the output directory (and parent directories) exist
try:
os.makedirs(job.outputdir)
os.chmod(job.outputdir, stat.S_IRWXU | stat.S_IRWXG | S_IROTH | S_IXOTH)
except: pass
# Make sure the tmp directory (in a tmpfs mount) exists
tmpdir = os.path.join(base_tmp_dir, job.basefilenm)
try:
os.makedirs(tmpdir)
except: pass
print "\nBeginning GBNCC search of '%s'"%job.fits_filenm
print "UTC time is: %s"%(time.asctime(time.gmtime()))
rfifindout=job.basefilenm+"_rfifind.out"
rfifindmask=job.basefilenm+"_rfifind.mask"
if not os.path.exists(rfifindout) or not os.path.exists(rfifindmask):
# rfifind the filterbank file
cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
(rfifind_chunk_time, job.basefilenm,
job.fits_filenm, job.basefilenm)
job.rfifind_time += timed_execute(cmd)
maskfilenm = job.basefilenm + "_rfifind.mask"
# Find the fraction that was suggested to be masked
# Note: Should we stop processing if the fraction is
# above some large value? Maybe 30%?
job.masked_fraction = find_masked_fraction(job)
# Iterate over the stages of the overall de-dispersion plan
dmstrs = []
for ddplan in ddplans:
# Make a downsampled filterbank file
if ddplan.downsamp > 1:
cmd = "psrfits_subband -dstime %d -nsub %d -o %s_DS%d %s"%\
(ddplan.downsamp, job.nchans, job.dsbasefilenm, ddplan.downsamp, job.dsbasefilenm )
job.downsample_time += timed_execute(cmd)
fits_filenm = job.dsbasefilenm + "_DS%d%s"%\
(ddplan.downsamp,job.fits_filenm[job.fits_filenm.rfind("_"):])
else:
fits_filenm = job.fits_filenm
# Iterate over the individual passes through the .fil file
for passnum in range(ddplan.numpasses):
subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])
# Now de-disperse
cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -nsub %d -numdms %d -numout %d -o %s/%s %s"%\
(maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep,
ddplan.dmstep, ddplan.numsub,
ddplan.dmsperpass, job.N/ddplan.downsamp,
tmpdir, job.basefilenm, fits_filenm)
job.dedispersing_time += timed_execute(cmd)
# Do the single-pulse search
cmd = "single_pulse_search.py -p -m %f -t %f %s/*.dat"%\
(singlepulse_maxwidth, singlepulse_threshold, tmpdir)
job.singlepulse_time += timed_execute(cmd)
spfiles = glob.glob("%s/*.singlepulse"%tmpdir)
for spfile in spfiles:
try:
shutil.move(spfile, workdir)
except: pass
# Iterate over all the new DMs
for dmstr in ddplan.dmlist[passnum]:
dmstrs.append(dmstr)
basenm = os.path.join(tmpdir, job.basefilenm+"_DM"+dmstr)
datnm = basenm+".dat"
fftnm = basenm+".fft"
infnm = basenm+".inf"
# FFT, zap, and de-redden
cmd = "realfft %s"%datnm
job.FFT_time += timed_execute(cmd)
cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
(default_zaplist, job.baryv, fftnm)
job.FFT_time += timed_execute(cmd)
cmd = "rednoise %s"%fftnm
job.FFT_time += timed_execute(cmd)
try:
os.rename(basenm+"_red.fft", fftnm)
except: pass
# Do the low-acceleration search
cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
job.lo_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
except: pass
try: # This prevents errors if there are no cand files to copy
shutil.move(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, workdir)
shutil.move(basenm+"_ACCEL_%d"%lo_accel_zmax, workdir)
except: pass
# Do the high-acceleration search
cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
job.hi_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
except: pass
try: # This prevents errors if there are no cand files to copy
shutil.move(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, workdir)
shutil.move(basenm+"_ACCEL_%d"%hi_accel_zmax, workdir)
except: pass
# Move the .inf files
try:
shutil.move(infnm, workdir)
except: pass
# Remove the .dat and .fft files
try:
os.remove(datnm)
except: pass
try:
os.remove(fftnm)
except: pass
# Make the single-pulse plots
basedmb = job.basefilenm+"_DM"
basedme = ".singlepulse "
# The following will make plots for DM ranges:
# 0-30, 20-110, 100-310, 300-1000+
dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
basedmb+"[012][0-9].[0-9][0-9]"+basedme,
basedmb+"[2-9][0-9].[0-9][0-9]"+basedme +
basedmb+"10[0-9].[0-9][0-9]"+basedme,
basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"30[0-9].[0-9][0-9]"+basedme,
basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
dmrangestrs = ["0-30", "20-110", "100-310", "300-1000+"]
psname = job.basefilenm+"_singlepulse.ps"
for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
cmd = 'single_pulse_search.py -t %f -g "%s"' % \
(singlepulse_plot_SNR, dmglob)
job.singlepulse_time += timed_execute(cmd)
try:
os.rename(psname,
job.basefilenm+"_DMs%s_singlepulse.ps"%dmrangestr)
except: pass
# Sift through the candidates to choose the best to fold
job.sifting_time = time.time()
lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
if len(lo_accel_cands) and len(hi_accel_cands):
lo_accel_cands, hi_accel_cands = remove_crosslist_duplicate_candidates(lo_accel_cands, hi_accel_cands)
if len(lo_accel_cands):
lo_accel_cands.sort(sifting.cmp_sigma)
sifting.write_candlist(lo_accel_cands,
job.basefilenm+".accelcands_Z%d"%lo_accel_zmax)
if len(hi_accel_cands):
hi_accel_cands.sort(sifting.cmp_sigma)
sifting.write_candlist(hi_accel_cands,
job.basefilenm+".accelcands_Z%d"%hi_accel_zmax)
try:
cmd = "mv *.accelcands* "+job.outputdir
os.system(cmd)
except: pass
job.sifting_time = time.time() - job.sifting_time
# Fold the best candidates
cands_folded = 0
for cand in lo_accel_cands:
if cands_folded == max_lo_cands_to_fold:
break
elif cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
cands_folded += 1
cands_folded = 0
for cand in hi_accel_cands:
if cands_folded == max_hi_cands_to_fold:
break
elif cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans, maskfilenm))
cands_folded += 1
# Remove the bestprof files
bpfiles = glob.glob("*.pfd.bestprof")
for bpfile in bpfiles:
os.remove(bpfile)
# Now step through the .ps files and convert them to .png and gzip them
psfiles = glob.glob("*.ps")
for psfile in psfiles:
if "singlepulse" in psfile:
os.system("pstoimg -density 200 -antialias -crop a "+psfile)
try:
os.remove(epsfile)
except: pass
else:
os.system("pstoimg -density 200 -antialias -flip cw "+psfile)
os.system("gzip "+psfile)
# Tar up the results files
tar_suffixes = ["_ACCEL_%d.tgz"%lo_accel_zmax,
"_ACCEL_%d.tgz"%hi_accel_zmax,
"_ACCEL_%d.cand.tgz"%lo_accel_zmax,
"_ACCEL_%d.cand.tgz"%hi_accel_zmax,
"_singlepulse.tgz",
"_inf.tgz",
"_pfd.tgz"]
tar_globs = ["*_ACCEL_%d"%lo_accel_zmax,
"*_ACCEL_%d"%hi_accel_zmax,
"*_ACCEL_%d.cand"%lo_accel_zmax,
"*_ACCEL_%d.cand"%hi_accel_zmax,
"*.singlepulse",
"*_DM[0-9]*.inf",
"*.pfd"]
for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
tf = tarfile.open(job.basefilenm+tar_suffix, "w:gz")
for infile in glob.glob(tar_glob):
tf.add(infile)
os.remove(infile)
tf.close()
# Remove all the downsampled .fits files
fitsfiles = glob.glob("*_DS?*.fits") + glob.glob("*_DS??*.fits")
for fitsfile in fitsfiles:
os.remove(fitsfile)
# Remove the tmp directory (in a tmpfs mount)
try:
os.rmdir(tmpdir)
except: pass
# And finish up
job.total_time = time.time() - job.total_time
print "\nFinished"
print "UTC time is: %s"%(time.asctime(time.gmtime()))
# Write the job report
job.write_report(job.basefilenm+".report")
job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))
# Move all the important stuff to the output directory
cmd = "mv *rfifind.[bimors]* *.tgz *.ps.gz *.png *.report "+\
job.outputdir
os.system(cmd)
if __name__ == "__main__":
# Create our de-dispersion plans
# All GBNCC data have 4096 channels, but the earliest data is sampled
# at 163.84us rather than 81.92 us...
ddplans = {'4096slow':[], '4096fast':[]}
if (1):
#
# If there is <=1GB of RAM per CPU core, the following are preferred
#
# For 4096slow chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans['4096slow'].append(dedisp_plan( 0.0, 0.02, 86, 81, 128, 1))
ddplans['4096slow'].append(dedisp_plan(139.32, 0.03, 102, 27, 128, 2))
ddplans['4096slow'].append(dedisp_plan(221.94, 0.05, 102, 33, 128, 4))
ddplans['4096slow'].append(dedisp_plan(390.24, 0.10, 102, 11, 128, 8))
# For 4096fast chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans['4096fast'].append(dedisp_plan( 0.0, 0.01, 86, 81, 128, 1))
ddplans['4096fast'].append(dedisp_plan( 69.66, 0.02, 86, 33, 128, 2))
ddplans['4096fast'].append(dedisp_plan(126.42, 0.03, 102, 29, 128, 4))
ddplans['4096fast'].append(dedisp_plan(215.16, 0.05, 102, 33, 128, 8))
ddplans['4096fast'].append(dedisp_plan(383.46, 0.10, 102, 12, 128, 16))
else:
# If there is >2GB of RAM per CPU core, the following are preferred
#
# For 4096slow chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans['4096slow'].append(dedisp_plan( 0.0, 0.02, 172, 41, 256, 1))
ddplans['4096slow'].append(dedisp_plan(141.04, 0.03, 204, 14, 256, 2))
ddplans['4096slow'].append(dedisp_plan(226.72, 0.05, 204, 16, 256, 4))
ddplans['4096slow'].append(dedisp_plan(389.92, 0.10, 204, 6, 256, 8))
# For 4096fast chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans['4096fast'].append(dedisp_plan( 0.0, 0.01, 172, 41, 256, 1))
ddplans['4096fast'].append(dedisp_plan( 70.52, 0.02, 172, 16, 256, 2))
ddplans['4096fast'].append(dedisp_plan(125.56, 0.03, 204, 15, 256, 4))
ddplans['4096fast'].append(dedisp_plan(217.36, 0.05, 204, 17, 256, 8))
ddplans['4096fast'].append(dedisp_plan(390.76, 0.10, 204, 6, 256, 16))
# Arguments to the search program are
# sys.argv[1] = PSRFITS file name
# sys.argv[2] = working directory name
if len(sys.argv) >= 3:
workdir = sys.argv[2]
fits_filenm = sys.argv[1]
main(fits_filenm, workdir, ddplans)
elif len(sys.argv) == 2:
fits_filenm = sys.argv[1]
main(fits_filenm, '.', ddplans)
else:
print "GBNCC_search.py fits_filenm [workdir]"
|
pscholz/presto
|
bin/GBNCC_search.py
|
Python
|
gpl-2.0
| 27,691
|
[
"Gaussian"
] |
df661539f62fdb1e135a1383d079180f04b53d8fc36175743416082cc18a9dd1
|
""" This module contain solvers for all kinds of equations:
- algebraic, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- transcendental, use tsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from sympy.core.sympify import sympify
from sympy.core import S, Mul, Add, Pow, Symbol, Wild, Equality, Dummy
from sympy.core.numbers import ilcm
from sympy.functions import log, exp, LambertW
from sympy.simplify import simplify, collect
from sympy.matrices import Matrix, zeros
from sympy.polys import roots, cancel
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.utilities import any, all
from sympy.utilities.iterables import iff
from sympy.utilities.lambdify import lambdify
from sympy.mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from sympy.solvers.inequalities import reduce_inequalities
from warnings import warn
# Codes for guess solve strategy
GS_POLY = 0
GS_RATIONAL = 1
GS_POLY_CV_1 = 2 # can be converted to a polynomial equation via the change of variable y -> x**a, a real
GS_POLY_CV_2 = 3 # can be converted to a polynomial equation multiplying on both sides by x**m
# for example, x + 1/x == 0. Multiplying by x yields x**2 + x == 0
GS_RATIONAL_CV_1 = 4 # can be converted to a rational equation via the change of variable y -> x**n
GS_PIECEWISE = 5
GS_TRANSCENDENTAL = 6
def guess_solve_strategy(expr, symbol):
"""
Tries to guess what approach should be used to solve a specific equation
Returns
=======
- -1: could not guess
- integer > 0: code representing certain type of equation. See GS_* fields
on this module for a complete list
Examples
========
>>> from sympy import Symbol, Rational
>>> from sympy.solvers.solvers import guess_solve_strategy
>>> from sympy.abc import x
>>> guess_solve_strategy(x**2 + 1, x)
0
>>> guess_solve_strategy(x**Rational(1,2) + 1, x)
2
"""
eq_type = -1
if expr.is_Add:
return max([guess_solve_strategy(i, symbol) for i in expr.args])
elif expr.is_Mul:
# check for rational functions
num, denom = expr.as_numer_denom()
if denom != 1 and denom.has(symbol):
#we have a quotient
m = max(guess_solve_strategy(num, symbol), guess_solve_strategy(denom, symbol))
if m == GS_POLY:
return GS_RATIONAL
elif m == GS_POLY_CV_1:
return GS_RATIONAL_CV_1
else:
raise NotImplementedError
else:
return max([guess_solve_strategy(i, symbol) for i in expr.args])
elif expr.is_Symbol:
return GS_POLY
elif expr.is_Pow:
if expr.exp.has(symbol):
return GS_TRANSCENDENTAL
elif not expr.exp.has(symbol) and expr.base.has(symbol):
if expr.exp.is_Integer and expr.exp > 0:
eq_type = max(eq_type, GS_POLY)
elif expr.exp.is_Integer and expr.exp < 0:
eq_type = max(eq_type, GS_POLY_CV_2)
elif expr.exp.is_Rational:
eq_type = max(eq_type, GS_POLY_CV_1)
else:
return GS_TRANSCENDENTAL
elif expr.is_Piecewise:
return GS_PIECEWISE
elif expr.is_Function and expr.has(symbol):
return GS_TRANSCENDENTAL
elif not expr.has(symbol):
return GS_POLY
return eq_type
def solve(f, *symbols, **flags):
"""Solves equations and systems of equations.
Currently supported are univariate polynomial, transcendental
equations, piecewise combinations thereof and systems of linear
and polynomial equations. Input is formed as a single expression
or an equation, or an iterable container in case of an equation
system. The type of output may vary and depends heavily on the
input. For more details refer to more problem specific functions.
By default all solutions are simplified to make the output more
readable. If this is not the expected behavior (e.g., because of
speed issues) set simplified=False in function arguments.
To solve equations and systems of equations like recurrence relations
or differential equations, use rsolve() or dsolve(), respectively.
>>> from sympy import I, solve
>>> from sympy.abc import x, y
Solve a polynomial equation:
>>> solve(x**4-1, x)
[1, -1, -I, I]
Solve a linear system:
>>> solve((x+5*y-2, -3*x+6*y-15), x, y)
{x: -3, y: 1}
"""
def sympit(w):
return map(sympify, iff(isinstance(w,(list, tuple, set)), w, [w]))
# make f and symbols into lists of sympified quantities
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
bare_f = not isinstance(f, (list, tuple, set))
f, symbols = (sympit(w) for w in [f, symbols])
if any(isinstance(fi, bool) or (fi.is_Relational and not fi.is_Equality) for fi in f):
return reduce_inequalities(f, assume=flags.get('assume'))
for i, fi in enumerate(f):
if fi.is_Equality:
f[i] = fi.lhs - fi.rhs
if not symbols:
#get symbols from equations or supply dummy symbols since
#solve(3,x) returns []...though it seems that it should raise some sort of error TODO
symbols = set([])
for fi in f:
symbols |= fi.atoms(Symbol) or set([Dummy('x')])
symbols = list(symbols)
if bare_f:
f=f[0]
if len(symbols) == 1:
if isinstance(symbols[0], (list, tuple, set)):
symbols = symbols[0]
result = list()
# Begin code handling for Function and Derivative instances
# Basic idea: store all the passed symbols in symbols_passed, check to see
# if any of them are Function or Derivative types, if so, use a dummy
# symbol in their place, and set symbol_swapped = True so that other parts
# of the code can be aware of the swap. Once all swapping is done, the
# continue on with regular solving as usual, and swap back at the end of
# the routine, so that whatever was passed in symbols is what is returned.
symbols_new = []
symbol_swapped = False
symbols_passed = list(symbols)
for i, s in enumerate(symbols):
if s.is_Symbol:
s_new = s
elif s.is_Function:
symbol_swapped = True
s_new = Dummy('F%d' % i)
elif s.is_Derivative:
symbol_swapped = True
s_new = Dummy('D%d' % i)
else:
raise TypeError('not a Symbol or a Function')
symbols_new.append(s_new)
if symbol_swapped:
swap_back_dict = dict(zip(symbols_new, symbols))
# End code for handling of Function and Derivative instances
if not isinstance(f, (tuple, list, set)):
# Create a swap dictionary for storing the passed symbols to be solved
# for, so that they may be swapped back.
if symbol_swapped:
swap_dict = zip(symbols, symbols_new)
f = f.subs(swap_dict)
symbols = symbols_new
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
f = piecewise_fold(f)
if len(symbols) != 1:
result = {}
for s in symbols:
result[s] = solve(f, s, **flags)
if flags.get('simplified', True):
for s, r in result.items():
result[s] = map(simplify, r)
return result
symbol = symbols[0]
strategy = guess_solve_strategy(f, symbol)
if strategy == GS_POLY:
poly = f.as_poly( symbol )
if poly is None:
raise NotImplementedError("Cannot solve equation " + str(f) + " for "
+ str(symbol))
# for cubics and quartics, if the flag wasn't set, DON'T do it
# by default since the results are quite long. Perhaps one could
# base this decision on a certain crtical length of the roots.
if poly.degree > 2:
flags['simplified'] = flags.get('simplified', False)
result = roots(poly, cubics=True, quartics=True).keys()
elif strategy == GS_RATIONAL:
P, Q = f.as_numer_denom()
#TODO: check for Q != 0
result = solve(P, symbol, **flags)
elif strategy == GS_POLY_CV_1:
args = list(f.args)
if isinstance(f, Add):
# we must search for a suitable change of variable
# collect exponents
exponents_denom = list()
for arg in args:
if isinstance(arg, Pow):
exponents_denom.append(arg.exp.q)
elif isinstance(arg, Mul):
for mul_arg in arg.args:
if isinstance(mul_arg, Pow):
exponents_denom.append(mul_arg.exp.q)
assert len(exponents_denom) > 0
if len(exponents_denom) == 1:
m = exponents_denom[0]
else:
# get the LCM of the denominators
m = reduce(ilcm, exponents_denom)
# x -> y**m.
# we assume positive for simplification purposes
t = Dummy('t', positive=True)
f_ = f.subs(symbol, t**m)
if guess_solve_strategy(f_, t) != GS_POLY:
raise NotImplementedError("Could not convert to a polynomial equation: %s" % f_)
cv_sols = solve(f_, t)
for sol in cv_sols:
result.append(sol**m)
elif isinstance(f, Mul):
for mul_arg in args:
result.extend(solve(mul_arg, symbol))
elif strategy == GS_POLY_CV_2:
m = 0
args = list(f.args)
if isinstance(f, Add):
for arg in args:
if isinstance(arg, Pow):
m = min(m, arg.exp)
elif isinstance(arg, Mul):
for mul_arg in arg.args:
if isinstance(mul_arg, Pow):
m = min(m, mul_arg.exp)
elif isinstance(f, Mul):
for mul_arg in args:
if isinstance(mul_arg, Pow):
m = min(m, mul_arg.exp)
f1 = simplify(f*symbol**(-m))
result = solve(f1, symbol)
# TODO: we might have introduced unwanted solutions
# when multiplied by x**-m
elif strategy == GS_PIECEWISE:
result = set()
for expr, cond in f.args:
candidates = solve(expr, *symbols)
if isinstance(cond, bool) or cond.is_Number:
if not cond:
continue
# Only include solutions that do not match the condition
# of any of the other pieces.
for candidate in candidates:
matches_other_piece = False
for other_expr, other_cond in f.args:
if isinstance(other_cond, bool) \
or other_cond.is_Number:
continue
if bool(other_cond.subs(symbol, candidate)):
matches_other_piece = True
break
if not matches_other_piece:
result.add(candidate)
else:
for candidate in candidates:
if bool(cond.subs(symbol, candidate)):
result.add(candidate)
result = list(result)
elif strategy == GS_TRANSCENDENTAL:
#a, b = f.as_numer_denom()
# Let's throw away the denominator for now. When we have robust
# assumptions, it should be checked, that for the solution,
# b!=0.
result = tsolve(f, *symbols)
elif strategy == -1:
raise ValueError('Could not parse expression %s' % f)
else:
raise NotImplementedError("No algorithms are implemented to solve equation %s" % f)
# This symbol swap should not be necessary for the single symbol case: if you've
# solved for the symbol the it will not appear in the solution. Right now, however
# ode's are getting solutions for solve (even though they shouldn't be -- see the
# swap_back test in test_solvers).
if symbol_swapped:
result = [ri.subs(swap_back_dict) for ri in result]
if flags.get('simplified', True) and strategy != GS_RATIONAL:
return map(simplify, result)
else:
return result
else:
if not f:
return {}
else:
# Create a swap dictionary for storing the passed symbols to be
# solved for, so that they may be swapped back.
if symbol_swapped:
swap_dict = zip(symbols, symbols_new)
f = [fi.subs(swap_dict) for fi in f]
symbols = symbols_new
polys = []
for g in f:
poly = g.as_poly(*symbols)
if poly is not None:
polys.append(poly)
else:
raise NotImplementedError()
if all(p.is_linear for p in polys):
n, m = len(f), len(symbols)
matrix = zeros((n, m + 1))
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = list(monom).index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
soln = solve_linear_system(matrix, *symbols, **flags)
else:
soln = solve_poly_system(polys)
# Use swap_dict to ensure we return the same type as what was
# passed
if symbol_swapped:
if isinstance(soln, dict):
res = {}
for k in soln.keys():
res.update({swap_back_dict[k]: soln[k]})
return res
else:
return soln
else:
return soln
def solve_linear(lhs, rhs=0, x=[], exclude=[]):
""" Return a tuple containing derived from f = lhs - rhs that is either:
(numerator, denominator) of f; if this comes back as (0, 1) it means
that f was actually zero even though it may have had symbols:
e.g. y*cos(x)**2 + y*sin(x)**2 - y = y*(0) = 0 If the numerator
is not zero then the function is guaranteed not to be zero.
or
(symbol, solution) where symbol appears linearly in the numerator of f,
is in x (if given) and is not in exclude (if given).
No simplification is done to f other than and mul=True expansion, so
the solution will correspond strictly to a unique solution.
Examples:
>>> from sympy.solvers.solvers import solve_linear
>>> from sympy.abc import x, y, z
These are linear in x and 1/x:
>>> solve_linear(x + y**2)
(x, -y**2)
>>> solve_linear(1/x - y**2)
(x, y**(-2))
When not linear in x or y then the numerator and denominator are returned.
>>> solve_linear(x**2/y**2 - 3)
(x**2 - 3*y**2, y**2)
If x is allowed to cancel, then this appears linear, but this sort of
cancellation is not done so the solultion will always satisfy the original
expression without causing a division by zero error.
>>> solve_linear(x**2*(1/x - z**2/x))
(x**2*(x - x*z**2), x**2)
You can give a list of what you prefer for x candidates:
>>> solve_linear(x + y + z, x=[y])
(y, -x - z)
You can also indicate what variables you don't want to consider:
>>> solve_linear(x + y + z, exclude=[x, z])
(y, -x - z)
If only x was excluded then a solution for y or z might be obtained.
"""
from sympy import expand_mul, Equality
if isinstance(lhs, Equality):
rhs += lhs.rhs
lhs = lhs.lhs
n, d = (lhs - rhs).as_numer_denom()
ex = expand_mul(n)
if not ex:
return ex, d
exclude = set(exclude)
syms = ex.free_symbols
if not x:
x = syms
else:
x = syms.intersection(x)
x = x.difference(exclude)
for xi in x:
dn = n.diff(xi)
# if not dn then this is a pseudo-function of xi
if dn and not dn.has(xi):
return xi, -(n.subs(xi, 0))/dn
return n, d
def solve_linear_system(system, *symbols, **flags):
"""Solve system of N linear equations with M variables, which means
both Cramer and over defined systems are supported. The possible
number of solutions is zero, one or infinite. Respectively this
procedure will return None or dictionary with solutions. In the
case of over defined system all arbitrary parameters are skipped.
This may cause situation in with empty dictionary is returned.
In this case it means all symbols can be assigned arbitrary values.
Input to this functions is a Nx(M+1) matrix, which means it has
to be in augmented form. If you are unhappy with such setting
use 'solve' method instead, where you can input equations
explicitly. And don't worry about the matrix, this function
is persistent and will make a local copy of it.
The algorithm used here is fraction free Gaussian elimination,
which results, after elimination, in upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
>>> from sympy import Matrix, solve_linear_system
>>> from sympy.abc import x, y
Solve the following system:
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
"""
matrix = system[:,:]
syms = list(symbols)
i, m = 0, matrix.cols-1 # don't count augmentation
while i < matrix.rows:
if i == m:
# an overdetermined system
if any(matrix[i:,m]):
return None # no solutions
else:
# remove trailing rows
matrix = matrix[:i,:]
break
if not matrix[i, i]:
# there is no pivot in current column
# so try to find one in other columns
for k in xrange(i+1, m):
if matrix[i, k]:
break
else:
if matrix[i, m]:
return None # no solutions
else:
# zero row or was a linear combination of
# other rows so now we can safely skip it
matrix.row_del(i)
continue
# we want to change the order of colums so
# the order of variables must also change
syms[i], syms[k] = syms[k], syms[i]
matrix.col_swap(i, k)
pivot_inv = S.One / matrix [i, i]
# divide all elements in the current row by the pivot
matrix.row(i, lambda x, _: x * pivot_inv)
for k in xrange(i+1, matrix.rows):
if matrix[k, i]:
coeff = matrix[k, i]
# subtract from the current row the row containing
# pivot and multiplied by extracted coefficient
matrix.row(k, lambda x, j: simplify(x - matrix[i, j]*coeff))
i += 1
# if there weren't any problems, augmented matrix is now
# in row-echelon form so we can check how many solutions
# there are and extract them using back substitution
simplified = flags.get('simplified', True)
if len(syms) == matrix.rows:
# this system is Cramer equivalent so there is
# exactly one solution to this system of equations
k, solutions = i-1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in xrange(k+1, m):
content -= matrix[k, j]*solutions[syms[j]]
if simplified:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
elif len(syms) > matrix.rows:
# this system will have infinite number of solutions
# dependent on exactly len(syms) - i parameters
k, solutions = i-1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in xrange(k+1, i):
content -= matrix[k, j]*solutions[syms[j]]
# run back-substitution for parameters
for j in xrange(i, m):
content -= matrix[k, j]*syms[j]
if simplified:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
else:
return None # no solutions
def solve_undetermined_coeffs(equ, coeffs, sym, **flags):
"""Solve equation of a type p(x; a_1, ..., a_k) == q(x) where both
p, q are univariate polynomials and f depends on k parameters.
The result of this functions is a dictionary with symbolic
values of those parameters with respect to coefficients in q.
This functions accepts both Equations class instances and ordinary
SymPy expressions. Specification of parameters and variable is
obligatory for efficiency and simplicity reason.
>>> from sympy import Eq
>>> from sympy.abc import a, b, c, x
>>> from sympy.solvers import solve_undetermined_coeffs
>>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x)
{a: 1/c, b: -1/c}
"""
if isinstance(equ, Equality):
# got equation, so move all the
# terms to the left hand side
equ = equ.lhs - equ.rhs
equ = cancel(equ).as_numer_denom()[0]
system = collect(equ.expand(), sym, evaluate=False).values()
if not any([ equ.has(sym) for equ in system ]):
# consecutive powers in the input expressions have
# been successfully collected, so solve remaining
# system using Gaussian elimination algorithm
return solve(system, *coeffs, **flags)
else:
return None # no solutions
def solve_linear_system_LU(matrix, syms):
""" LU function works for invertible only """
assert matrix.rows == matrix.cols-1
A = matrix[:matrix.rows,:matrix.rows]
b = matrix[:,matrix.cols-1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i,0]
return solutions
x = Dummy('x')
a,b,c,d,e,f,g,h = [Wild(t, exclude=[x]) for t in 'abcdefgh']
patterns = None
def _generate_patterns():
"""
Generates patterns for transcendental equations.
This is lazily calculated (called) in the tsolve() function and stored in
the patterns global variable.
"""
tmp1 = f ** (h-(c*g/b))
tmp2 = (-e*tmp1/a)**(1/d)
global patterns
patterns = [
(a*(b*x+c)**d + e , ((-(e/a))**(1/d)-c)/b),
( b+c*exp(d*x+e) , (log(-b/c)-e)/d),
(a*x+b+c*exp(d*x+e) , -b/a-LambertW(c*d*exp(e-b*d/a)/a)/d),
( b+c*f**(d*x+e) , (log(-b/c)-e*log(f))/d/log(f)),
(a*x+b+c*f**(d*x+e) , -b/a-LambertW(c*d*f**(e-b*d/a)*log(f)/a)/d/log(f)),
( b+c*log(d*x+e) , (exp(-b/c)-e)/d),
(a*x+b+c*log(d*x+e) , -e/d+c/a*LambertW(a/c/d*exp(-b/c+a*e/c/d))),
(a*(b*x+c)**d + e*f**(g*x+h) , -c/b-d*LambertW(-tmp2*g*log(f)/b/d)/g/log(f))
]
def tsolve(eq, sym):
"""
Solves a transcendental equation with respect to the given
symbol. Various equations containing mixed linear terms, powers,
and logarithms, can be solved.
Only a single solution is returned. This solution is generally
not unique. In some cases, a complex solution may be returned
even though a real solution exists.
>>> from sympy import tsolve, log
>>> from sympy.abc import x
>>> tsolve(3**(2*x+5)-4, x)
[(-5*log(3) + log(4))/(2*log(3))]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if patterns is None:
_generate_patterns()
eq = sympify(eq)
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
sym = sympify(sym)
eq2 = eq.subs(sym, x)
# First see if the equation has a linear factor
# In that case, the other factor can contain x in any way (as long as it
# is finite), and we have a direct solution to which we add others that
# may be found for the remaining portion.
r = Wild('r')
m = eq2.match((a*x+b)*r)
if m and m[a]:
return [(-b/a).subs(m).subs(x, sym)] + solve(m[r], x)
for p, sol in patterns:
m = eq2.match(p)
if m:
return [sol.subs(m).subs(x, sym)]
# let's also try to inverse the equation
lhs = eq
rhs = S.Zero
while True:
indep, dep = lhs.as_independent(sym)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep is S.Zero:
break
lhs = dep
rhs-= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs/= indep
# -1
# f(x) = g -> x = f (g)
if lhs.is_Function and lhs.nargs==1 and hasattr(lhs, 'inverse'):
rhs = lhs.inverse() (rhs)
lhs = lhs.args[0]
sol = solve(lhs-rhs, sym)
return sol
elif lhs.is_Add:
# just a simple case - we do variable substitution for first function,
# and if it removes all functions - let's call solve.
# x -x -1
# UC: e + e = y -> t + t = y
t = Dummy('t')
terms = lhs.args
# find first term which is Function
for f1 in lhs.args:
if f1.is_Function:
break
else:
raise NotImplementedError("Unable to solve the equation" + \
"(tsolve: at least one Function expected at this point")
# perform the substitution
lhs_ = lhs.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not (lhs_.is_Function or
any(term.is_Function for term in lhs_.args)):
cv_sols = solve(lhs_ - rhs, t)
for sol in cv_sols:
if sol.has(sym):
raise NotImplementedError("Unable to solve the equation")
cv_inv = solve( t - f1, sym )[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
return sols
raise NotImplementedError("Unable to solve the equation.")
def msolve(*args, **kwargs):
"""
Compatibility wrapper pointing to nsolve().
msolve() has been renamed to nsolve(), please use nsolve() directly."""
warn('msolve() is has been renamed, please use nsolve() instead',
DeprecationWarning)
args[0], args[1] = args[1], args[0]
return nsolve(*args, **kwargs)
# TODO: option for calculating J numerically
def nsolve(*args, **kwargs):
"""
Solve a nonlinear equation system numerically.
nsolve(f, [args,] x0, modules=['mpmath'], **kwargs)
f is a vector function of symbolic expressions representing the system.
args are the variables. If there is only one variable, this argument can be
omitted.
x0 is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to evaluate
the function and the Jacobian matrix. Make sure to use a module that
supports matrices. For more information on the syntax, please see the
docstring of lambdify.
Overdetermined systems are supported.
>>> from sympy import Symbol, nsolve
>>> import sympy
>>> sympy.mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print nsolve((f1, f2), (x1, x2), (-1, 1))
[-1.19287309935246]
[ 1.27844411169911]
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin, nsolve
>>> from sympy.abc import x
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
mpmath.findroot is used, you can find there more extensive documentation,
especially concerning keyword parameters and available solvers.
"""
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if isinstance(f, (list, tuple)):
f = Matrix(f).T
if not isinstance(f, Matrix):
# assume it's a sympy expression
if isinstance(f, Equality):
f = f.lhs - f.rhs
f = f.evalf()
atoms = f.atoms(Symbol)
if fargs is None:
fargs = atoms.copy().pop()
if not (len(atoms) == 1 and (fargs in atoms or fargs[0] in atoms)):
raise ValueError('expected a one-dimensional and numerical function')
# the function is much better behaved if there is no denominator
f = f.as_numer_denom()[0]
f = lambdify(fargs, f, modules)
return findroot(f, x0, **kwargs)
if len(fargs) > f.cols:
raise NotImplementedError('need at least as many equations as variables')
verbose = kwargs.get('verbose', False)
if verbose:
print 'f(x):'
print f
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print 'J(x):'
print J
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
return x
|
pernici/sympy
|
sympy/solvers/solvers.py
|
Python
|
bsd-3-clause
| 31,233
|
[
"Gaussian"
] |
2955a87d5f892fbdc8908e3f3003130a060c29f785ebe71293e7170091675dcb
|
# Import modules
try:
import dryscrape
except:
exit('Unable to import dryscrape.')
try:
from bs4 import BeautifulSoup
except:
exit('Unable to import BeautifulSoup from bs4.')
# Lyrics functions
class lyrics:
def get(artist, title):
artist = artist.replace(' ', '_')
title = title.replace(' ', '_')
session = dryscrape.Session()
try:
session.visit('http://lyrics.wikia.com/wiki/' + artist + '%3A' + title)
except:
exit('Lyrically - Couldn\'t connect to Wikia')
page_html = session.body()
soup = BeautifulSoup(page_html, 'lxml')
try:
lyrics_html = soup.findAll('div', {'class' : 'lyricbox'})[0]
except:
exit('Lyrically - You can only use lyrics.get once per session.')
lyrics_html = str(lyrics_html)
lyrics_html = lyrics_html[22:][:-38]
lyrics_html = lyrics_html.replace('</div>', '')
lyrics_html = lyrics_html.replace('<br/>', '\n')
return(lyrics_html)
|
Wingysam/lyrically
|
lyrically/__init__.py
|
Python
|
gpl-3.0
| 1,033
|
[
"VisIt"
] |
28f5d6b8816f90176b43aef60151b9a8fa49cfa76cbfea0746fa52eece06f6ed
|
import py.path
from _pytest.config import Config, PytestPluginManager, default_plugins
try:
# pytest >= 3.4
from _pytest.nodes import FSCollector
except ImportError:
# pytest < 3.4
from _pytest.main import FSCollector
class GraftedSubSession(FSCollector):
"""
Collects test files from outside of the file tree of the current pytest session.
By default, pytest will only collect files from the directories with which it was
invoked (or the current working directory, if none).
With this, a "sub-session" may be collected from outside of the scope of the pytest
session, e.g., in a third-party package.
Args:
name (str): The name of this grafted session.
parent (Collector): The parent pytest collector.
fspath (str): The directory from which files should be collected.
"""
def __init__(self, name, parent, fspath):
fspath = py.path.local(fspath)
# Get a new configuration for our path, which may be outside of the
# scope of the parent pytest session.
config = _build_config_for_path(fspath)
super(GraftedSubSession, self).__init__(fspath, parent=parent, config=config)
# Use our given name, rather than the path-based name set by :class:`FSCollector`.
self.name = name
@property
def gethookproxy(self):
return self.session.gethookproxy
@property
def _fixturemanager(self):
return self.session._fixturemanager
def reportinfo(self):
return self.fspath, None, ""
def collect(self):
self._fixturemanager.parsefactories(self)
for path in self.fspath.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for f in self._collectfile(path):
yield f
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _build_config_for_path(path):
"""
Builds and returns a basic test configuration rooted at the given path.
Args:
path (LocalPath): The path to the files under test.
Returns:
Config: The generated test configuration.
"""
# Find the root directory of the package containing our files.
for rootdir in path.parts(reverse=True):
if rootdir.join("setup.py").exists():
break
else:
rootdir = path
# Initialize a base configuration as pytest would.
pluginmanager = PytestPluginManager()
for spec in default_plugins:
pluginmanager.import_plugin(spec)
config = Config(pluginmanager)
# Ensure that pytest sets its root directory (``config.rootdir``) to the
# given path. If we don't, then using this configuration from outside of
# this path will confuse pytest.
args = [rootdir]
config.parse(args)
return config
|
elliterate/capybara.py
|
capybara/tests/collector.py
|
Python
|
mit
| 3,306
|
[
"VisIt"
] |
2acceaa9a826bee5d8cde62c1c67ee7964b2491c64cef0a277352f62297375d8
|
../../../../../../../share/pyshared/orca/scripts/apps/packagemanager/tutorialgenerator.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/packagemanager/tutorialgenerator.py
|
Python
|
gpl-3.0
| 89
|
[
"ORCA"
] |
1cb380c4a6afa07d0e246e6454c3173bec5ae427d985034bea40e8608f0fac75
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-19 18:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_patient_dob'),
]
operations = [
migrations.CreateModel(
name='Medication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('dosage', models.CharField(max_length=100)),
('reason', models.CharField(max_length=200)),
('instructions', models.CharField(max_length=1000)),
('date_prescribed', models.DateField(default=b'2016-11-16')),
],
),
migrations.CreateModel(
name='Treatment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=b'2016-11-16')),
('description', models.CharField(max_length=2000)),
],
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name=b'2016-11-16')),
('notes', models.CharField(max_length=1000)),
],
),
migrations.AddField(
model_name='patient',
name='phone_number',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='patient',
name='dob',
field=models.DateField(default=b'1984-01-01'),
),
migrations.AddField(
model_name='visit',
name='patient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='visits', to='app.Patient'),
),
migrations.AddField(
model_name='treatment',
name='patient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='treatments', to='app.Patient'),
),
migrations.AddField(
model_name='medication',
name='patient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='medications', to='app.Patient'),
),
]
|
kylewalters18/obulamu
|
server/app/migrations/0003_auto_20161119_1819.py
|
Python
|
mit
| 2,591
|
[
"VisIt"
] |
afe18214469e51aa96f5201986c4f7a699f5b925402eb1ed66b1f0f8ea4fa9d7
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag file_path) the list of relative file paths to consider. If
any target has at least one of the paths as a source (or input to an action or
rule) then 'Found dependency' is output, otherwise 'No dependencies' is output.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import os
import posixpath
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def __MakeRelativeTargetName(path):
"""Converts a gyp target name into a relative name. For example, the path to a
gyp file may be something like c:\foo\bar.gyp:target, this converts it to
bar.gyp.
"""
prune_path = os.getcwd()
if path.startswith(prune_path):
path = path[len(prune_path):]
# Gyp paths are always posix style.
path = path.replace('\\', '/')
if path.endswith('#target'):
path = path[0:len(path) - len('#target')]
return path
def __ExtractBasePath(target):
"""Extracts the path components of the specified gyp target path."""
last_index = target.rfind('/')
if last_index == -1:
return ''
return target[0:(last_index + 1)]
def __AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
path_components = base_path_components[:]
# Resolve relative paths.
while source.startswith('../'):
path_components.pop(len(path_components) - 1)
source = source[3:]
result.append('/'.join(path_components) + source)
continue
result.append(base_path + source)
def __ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
__AddSources(action['inputs'], base_path, base_path_components, results)
def __ExtractSources(target, target_dict):
base_path = posixpath.dirname(target)
base_path_components = base_path.split('/')
# Add a trailing '/' so that __AddSources() can easily build paths.
if len(base_path):
base_path += '/'
results = []
if 'sources' in target_dict:
__AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these effect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
__ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
__ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
sources: set of source files defined by this target. This includes inputs to
actions and rules.
deps: list of direct dependencies."""
def __init__(self):
self.sources = []
self.deps = []
def __GenerateTargets(target_list, target_dicts):
"""Generates a dictionary with the key the name of a target and the value a
Target."""
targets = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
absolute_target_name = targets_to_visit.pop()
# |absolute_target| may be an absolute path and may include #target.
# References to targets are relative, so we need to clean the name.
relative_target_name = __MakeRelativeTargetName(absolute_target_name)
if relative_target_name in targets:
continue
target = Target()
targets[relative_target_name] = target
target.sources.extend(__ExtractSources(relative_target_name,
target_dicts[absolute_target_name]))
for dep in target_dicts[absolute_target_name].get('dependencies', []):
targets[relative_target_name].deps.append(__MakeRelativeTargetName(dep))
targets_to_visit.append(dep)
return targets
def __GetFiles(params):
"""Returns the list of files to analyze, or None if none specified."""
generator_flags = params.get('generator_flags', {})
file_path = generator_flags.get('file_path', None)
if not file_path:
return None
try:
f = open(file_path, 'r')
result = []
for file_name in f:
if file_name.endswith('\n'):
file_name = file_name[0:len(file_name) - 1]
if len(file_name):
result.append(file_name)
f.close()
return result
except IOError:
print 'Unable to open file', file_path
return None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
files = __GetFiles(params)
if not files:
print 'Must specify files to analyze via file_path generator flag'
return
targets = __GenerateTargets(target_list, target_dicts)
files_set = frozenset(files)
found_in_all_sources = 0
for target_name, target in targets.iteritems():
sources = files_set.intersection(target.sources)
if len(sources):
print 'Found dependency'
return
print 'No dependencies'
|
AOSPU/external_chromium_org_tools_gyp
|
pylib/gyp/generator/analyzer.py
|
Python
|
bsd-3-clause
| 6,991
|
[
"VisIt"
] |
d2bfc5c3059cc9f790dbfe4b3fbaac9bece73e678f5fdcb32c277a2ab270ee6c
|
# const.py
# Aaron Taylor
# Moose Abumeeiz
#
# This file stores all of the constant information for the game, such as width
# and height of the window.
WIDTH = 960
HEIGHT = 540
GRATIO = 52 # Pixels/Grid size
GRIDX, GRIDY = 154, 88
MAPX = WIDTH-104
MAPY = 82
|
ExPHAT/binding-of-isaac
|
const.py
|
Python
|
mit
| 262
|
[
"MOOSE"
] |
7801d16397e294b77d4455f174bb597ad9c13a5b40416ba6eafae483d3e3d8db
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#2012 Bruno Chareyre <bruno.chareyre@grenoble-inp.fr>
"""Example usage of a TesselationWrapper object for getting microscale quantities."""
# See Catalano2014a for the definition of micro-strain
# (http://dx.doi.org/10.1002/nag.2198 or free-access at arxiv http://arxiv.org/pdf/1304.4895.pdf)
tt=TriaxialTest()
tt.generate("test.yade")
O.load("test.yade")
O.run(100,True)
TW=TesselationWrapper()
TW.triangulate() #compute regular Delaunay triangulation, don’t construct tesselation
TW.computeVolumes() #will silently tesselate the packing, then compute volume of each Voronoi cell
TW.volume(10) #get volume associated to sphere of id 10
TW.setState(0) #store current positions internaly for later use as the "0" state
O.run(100,True) #make particles move a little (let's hope they will!)
TW.setState(1) #store current positions internaly in the "1" (deformed) state
#Now we can define strain by comparing states 0 and 1, and average them at the particles scale
TW.defToVtk("strain.vtk")
|
cosurgi/trunk
|
examples/tesselationwrapper/tesselationWrapper.py
|
Python
|
gpl-2.0
| 1,036
|
[
"VTK"
] |
0dfca0d0367fe2089a470e78edd3f1a32f19ce1b97673aa94e0ae51e8071f855
|
import snakemake
import abc
import re
import os
import pysam
import pyfaidx
import rnftools
class Source(object):
""" Abstract class for a genome from which read tuples are simulated.
Args:
fasta (str): File name of the genome from which reads are created (FASTA file).
reads_in_tuple (int): Number of reads in each read tuple.
rng_seed (int): Seed for simulator's random number generator.
sequences (set of int or str): FASTA sequences to extract. Sequences can be specified either by their ids, or by their names.
number_of_required_cores (int): Number of cores used by the simulator. This parameter is used to prevent running other threads or programs at the same time.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, fasta, reads_in_tuple, rng_seed, sequences, number_of_required_cores=1):
rnftools.mishmash.add_source(self)
self._rng_seed = rng_seed
self._reads_in_tuple = reads_in_tuple
self._sample = rnftools.mishmash.current_sample()
self._sample.add_source(self)
self.genome_id = len(self._sample.get_sources())
self._number_of_required_cores = number_of_required_cores
self._name = str(self.genome_id).zfill(3)
self._dir = os.path.join(self._sample.get_dir(), self._name)
self._fa0_fn = os.path.abspath(fasta)
self._fa_fn = os.path.abspath(os.path.join(self._dir, "reference.fa"))
self._fai_fn = self._fa_fn + ".fai"
self._seqs = sequences
self._fq_fn = os.path.join(self._dir, "_final_reads.fq")
self.dict_chr_ids = {}
self.dict_chr_lengths = {}
############################################################################
############################################################################
def get_dir(self):
"""Get working directory.
Returns:
str: Working directory.
"""
return self._dir
def get_genome_id(self):
"""Get genome ID.
Returns:
int: Genome ID.
"""
return self.genome_id
def get_reads_in_tuple(self):
"""Get number of entries in a read tuple.
Returns:
int: Number of reads in a read tuple.
"""
return self._reads_in_tuple
def get_number_of_required_cores(self):
"""Get number of required cores.
Returns:
int: Number of required cores.
"""
return self._number_of_required_cores
def clean(self):
"""Clean working directory.
"""
rnftools.utils.shell('rm -fR "{}"'.format(self.get_dir()))
############################################################################
############################################################################
def fa0_fn(self):
"""Get input FASTA file.
Returns:
str: Input FASTA file.
"""
return self._fa0_fn
def fa_fn(self):
"""Get output FASTA file (with selected chromosomes).
Returns:
str: Output FASTA file.
"""
return self._fa_fn
def fq_fn(self):
"""Get file name of the output FASTQ file.
Returns:
str: Output FASTQ file
"""
return self._fq_fn
def get_input(self):
"""Get list of input files (required to do simulation).
Returns:
list: List of input files
"""
raise NotImplementedError
############################################################################
############################################################################
def get_output(self):
"""Get list of output files (created during simulation).
Returns:
list: List of input files
"""
raise NotImplementedError
############################################################################
############################################################################
def create_fq(self):
"""Simulate reads.
"""
raise NotImplementedError
def create_fa(self):
"""Create a FASTA file with extracted sequences.
"""
if self._seqs is None:
os.symlink(self._fa0_fn, self._fa_fn)
else:
in_seqs = pyfaidx.Fasta(self._fa0_fn)
with open(self._fa_fn, "w+") as g:
for seq_desc in self._seqs:
x = in_seqs[seq_desc]
name, seq = x.name, str(x)
g.write(">" + name + "\n")
n = 80
seq_split = "\n".join([seq[i:i + n] for i in range(0, len(seq), n)])
g.write(seq_split + "\n")
@staticmethod
def recode_sam_reads(
sam_fn,
fastq_rnf_fo,
fai_fo,
genome_id,
number_of_read_tuples=10**9,
simulator_name=None,
allow_unmapped=False,
):
"""Transform a SAM file to RNF-compatible FASTQ.
Args:
sam_fn (str): SAM/BAM file - file name.
fastq_rnf_fo (str): Output FASTQ file - file object.
fai_fo (str): FAI index of the reference genome - file object.
genome_id (int): Genome ID for RNF.
number_of_read_tuples (int): Expected number of read tuples (to set width of read tuple id).
simulator_name (str): Name of the simulator. Used for comment in read tuple name.
allow_unmapped (bool): Allow unmapped reads.
Raises:
NotImplementedError
"""
fai_index = rnftools.utils.FaIdx(fai_fo)
# last_read_tuple_name=[]
read_tuple_id_width = len(format(number_of_read_tuples, 'x'))
fq_creator = rnftools.rnfformat.FqCreator(
fastq_fo=fastq_rnf_fo,
read_tuple_id_width=read_tuple_id_width,
genome_id_width=2,
chr_id_width=fai_index.chr_id_width,
coor_width=fai_index.coor_width,
info_reads_in_tuple=True,
info_simulator=simulator_name,
)
# todo: check if clipping corrections is well implemented
cigar_reg_shift = re.compile("([0-9]+)([MDNP=X])")
# todo: other upac codes
reverse_complement_dict = {
"A": "T",
"T": "A",
"C": "G",
"G": "C",
"N": "N",
}
read_tuple_id = 0
last_read_tuple_name = None
with pysam.AlignmentFile(
sam_fn,
check_header=False,
) as samfile:
for alignment in samfile:
if alignment.query_name != last_read_tuple_name and last_read_tuple_name is not None:
read_tuple_id += 1
last_read_tuple_name = alignment.query_name
if alignment.is_unmapped:
rnftools.utils.error(
"SAM files used for conversion should not contain unaligned segments. "
"This condition is broken by read tuple "
"'{}' in file '{}'.".format(alignment.query_name, sam_fn),
program="RNFtools",
subprogram="MIShmash",
exception=NotImplementedError,
)
if alignment.is_reverse:
direction = "R"
bases = "".join([reverse_complement_dict[nucl] for nucl in alignment.seq[::-1]])
qualities = str(alignment.qual[::-1])
else:
direction = "F"
bases = alignment.seq[:]
qualities = str(alignment.qual[:])
# todo: are chromosomes in bam sorted correctly (the same order as in FASTA)?
if fai_index.dict_chr_ids != {}:
chr_id = fai_index.dict_chr_ids[samfile.getrname(alignment.reference_id)]
else:
chr_id = "0"
left = int(alignment.reference_start) + 1
right = left - 1
for (steps, operation) in cigar_reg_shift.findall(alignment.cigarstring):
right += int(steps)
segment = rnftools.rnfformat.Segment(
genome_id=genome_id,
chr_id=chr_id,
direction=direction,
left=left,
right=right,
)
fq_creator.add_read(
read_tuple_id=read_tuple_id,
bases=bases,
qualities=qualities,
segments=[segment],
)
fq_creator.flush_read_tuple()
|
karel-brinda/rnftools
|
rnftools/mishmash/Source.py
|
Python
|
mit
| 8,429
|
[
"pysam"
] |
19e71e2215e84146ee5301d27286752ca1e3ed7643864610cfd30d6d21e8e207
|
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import os
from logging import Logger
from typing import Optional, Union
import networkx as nx
import numpy as np
from . import log_proc
from .graphs import create_ccsize_dict
from .. import global_params
from ..handler.basics import load_pkl2obj, chunkify, flatten_list, \
write_txt2kzip, write_obj2pkl
from ..mp import batchjob_utils as qu
from ..mp.mp_utils import start_multiprocess_imap as start_multiprocess
from ..reps.rep_helper import knossos_ml_from_ccs
from ..reps.segmentation import SegmentationDataset
from ..reps.super_segmentation_object import SuperSegmentationObject
def run_glia_splitting():
"""
Start astrocyte splitting -> generate final connected components of neuron vs.
glia SVs.
"""
cc_dict = load_pkl2obj(global_params.config.working_dir + "/glia/cc_dict_rag_graphs.pkl")
chs = chunkify(sorted(list(cc_dict.values()), key=len, reverse=True),
global_params.config.ncore_total * 2)
qu.batchjob_script(chs, "split_glia", n_cores=1, remove_jobfolder=True)
def collect_glia_sv():
"""
Collect astrocyte super voxels (as returned by astrocyte splitting) from all 'sv'
SegmentationObjects contained in 'sv' SegmentationDataset (always uses
default version as defined in config.yml).
"""
cc_dict = load_pkl2obj(global_params.config.working_dir + "/glia/cc_dict_rag.pkl")
# get single SV glia probas which were not included in the old RAG
ids_in_rag = np.concatenate(list(cc_dict.values()))
sds = SegmentationDataset("sv", working_dir=global_params.config.working_dir)
# get SSV glia splits
chs = chunkify(list(cc_dict.keys()), global_params.config['ncores_per_node'] * 10)
astrocyte_svs = np.concatenate(start_multiprocess(collect_gliaSV_helper, chs, debug=False,
nb_cpus=global_params.config['ncores_per_node']))
log_proc.info("Collected SSV glia SVs.")
# Missing SVs were sorted out by the size filter
# TODO: Decide of those should be added to the glia RAG or not
missing_ids = np.setdiff1d(sds.ids, ids_in_rag)
np.save(global_params.config.working_dir + "/glia/astrocyte_svs.npy", astrocyte_svs)
neuron_svs = np.array(list(set(sds.ids).difference(set(astrocyte_svs).union(set(missing_ids)))),
dtype=np.uint64)
assert len((set(neuron_svs).union(set(astrocyte_svs)).union(set(missing_ids))).difference(set(
sds.ids))) == 0
np.save(global_params.config.working_dir + "/glia/neuron_svs.npy", neuron_svs)
np.save(global_params.config.working_dir + "/glia/pruned_svs.npy", missing_ids)
log_proc.info("Collected whole dataset glia and neuron predictions.")
def collect_gliaSV_helper(cc_ixs):
astrocyte_svs = []
for cc_ix in cc_ixs:
sso = SuperSegmentationObject(cc_ix, working_dir=global_params.config.working_dir,
version="gliaremoval")
sso.load_attr_dict()
ad = sso.attr_dict
astrocyte_svs += list(flatten_list(ad["astrocyte_svs"]))
return np.array(astrocyte_svs, dtype=np.uint64)
def write_astrocyte_svgraph(rag: Union[nx.Graph, str], min_ssv_size: float,
log: Optional[Logger] = None):
"""
Stores astrocyte and neuron RAGs in "wd + /glia/" or "wd + /neuron/" as networkx edge list
and as knossos merge list.
Parameters
----------
rag : SV agglomeration
min_ssv_size : Bounding box diagonal in nm
log: Logger
"""
if log is None:
log = log_proc
if type(rag) is str:
assert os.path.isfile(rag), "RAG has to be given."
g = nx.read_edgelist(rag, nodetype=np.uint, delimiter=',')
else:
g = rag
# create neuron RAG by glia removal
neuron_g = g.copy()
astrocyte_svs = np.load(global_params.config.working_dir + "/glia/astrocyte_svs.npy")
for ix in astrocyte_svs:
neuron_g.remove_node(ix)
# create astrocyte rag by removing neuron sv's
astrocyte_g = g.copy()
for ix in neuron_g.nodes():
astrocyte_g.remove_node(ix)
# create dictionary with CC sizes (BBD)
log.info("Finished neuron and glia RAG, now preparing CC size dict.")
sds = SegmentationDataset("sv", working_dir=global_params.config.working_dir, cache_properties=['size'])
sv_size_dict = {}
bbs = sds.load_numpy_data('bounding_box') * sds.scaling
for ii in range(len(sds.ids)):
sv_size_dict[sds.ids[ii]] = bbs[ii]
ccsize_dict = create_ccsize_dict(g, sv_size_dict)
log.info("Finished preparation of SSV size dictionary based on bounding box diagonal of corresponding SVs.")
# add CCs with single neuron SV manually
neuron_ids = list(neuron_g.nodes())
all_neuron_ids = np.load(global_params.config.working_dir + "/glia/neuron_svs.npy")
# remove small Neuron CCs
missing_neuron_svs = set(all_neuron_ids).difference(set(neuron_ids))
if len(missing_neuron_svs) > 0:
msg = "Missing %d astrocyte CCs with one SV." % len(missing_neuron_svs)
log.error(msg)
raise ValueError(msg)
before_cnt = len(neuron_g.nodes())
for ix in neuron_ids:
if ccsize_dict[ix] < min_ssv_size:
neuron_g.remove_node(ix)
log.info("Removed %d neuron CCs because of size." % (before_cnt - len(neuron_g.nodes())))
ccs = list(nx.connected_components(neuron_g))
cnt_neuron_sv = 0
with open(global_params.config.neuron_svagg_list_path, 'w') as f:
for cc in ccs:
f.write(','.join([str(el) for el in cc]) + '\n')
cnt_neuron_sv += len(cc)
nx.write_edgelist(neuron_g, global_params.config.neuron_svgraph_path)
log.info(f"Nb neuron CCs: {len(ccs)}")
log.info(f"Nb neuron SVs: {cnt_neuron_sv}")
# add glia CCs with single SV
astrocyte_ids = list(astrocyte_g.nodes())
missing_astrocyte_svs = set(astrocyte_svs).difference(set(astrocyte_ids))
if len(missing_astrocyte_svs) > 0:
msg = "Missing %d astrocyte CCs with one SV." % len(missing_astrocyte_svs)
log.error(msg)
raise ValueError(msg)
before_cnt = len(astrocyte_g.nodes())
for ix in astrocyte_ids:
if ccsize_dict[ix] < min_ssv_size:
astrocyte_g.remove_node(ix)
log.info("Removed %d astrocyte CCs because of size." % (before_cnt - len(astrocyte_g.nodes())))
ccs = list(nx.connected_components(astrocyte_g))
total_size = 0
for n in astrocyte_g.nodes():
total_size += sds.get_segmentation_object(n).size
total_size_cmm = np.prod(sds.scaling) * total_size / 1e18
log.info("Glia RAG contains {} SVs in {} CCs ({} mm^3; {} Gvx).".format(
astrocyte_g.number_of_nodes(), len(ccs), total_size_cmm, total_size / 1e9))
with open(global_params.config.astrocyte_svagg_list_path, 'w') as f:
for cc in ccs:
f.write(','.join([str(el) for el in cc]) + '\n')
nx.write_edgelist(astrocyte_g, global_params.config.astrocyte_svgraph_path())
def transform_rag_edgelist2pkl(rag):
"""
Stores networkx graph as dictionary mapping (1) SSV IDs to lists of SV IDs
and (2) SSV IDs to subgraphs (networkx)
Parameters
----------
rag : networkx.Graph
"""
ccs = (rag.subgraph(c) for c in nx.connected_components(rag))
cc_dict_graph = {}
cc_dict = {}
for cc in ccs:
curr_cc = list(cc.nodes())
min_ix = np.min(curr_cc)
if min_ix in cc_dict:
raise ValueError('Multiple SSV IDs')
cc_dict_graph[min_ix] = cc
cc_dict[min_ix] = curr_cc
write_obj2pkl(global_params.config.working_dir + "/glia/cc_dict_rag_graphs.pkl",
cc_dict_graph)
write_obj2pkl(global_params.config.working_dir + "/glia/cc_dict_rag.pkl", cc_dict)
|
StructuralNeurobiologyLab/SyConn
|
syconn/proc/glia_splitting.py
|
Python
|
gpl-2.0
| 7,971
|
[
"NEURON"
] |
be8e0c16f2f2cfe6c0253a16cb100f274ff1d7190fc7e166b04385b0269718a8
|
# Copyright 2009 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module allows to control GenePop.
"""
import os
import re
import shutil
import sys
import tempfile
from Bio.Application import AbstractCommandline, _Argument
__docformat__ = "restructuredtext en"
def _gp_float(tok):
"""Gets a float from a token, if it fails, returns the string (PRIVATE)."""
try:
return float(tok)
except ValueError:
return str(tok)
def _gp_int(tok):
"""Gets a int from a token, if it fails, returns the string (PRIVATE)."""
try:
return int(tok)
except ValueError:
return str(tok)
def _read_allele_freq_table(f):
l = f.readline()
while ' --' not in l:
if l == "":
raise StopIteration
if 'No data' in l:
return None, None
l = f.readline()
alleles = [x for x in f.readline().rstrip().split(" ") if x != '']
alleles = [_gp_int(x) for x in alleles]
l = f.readline().rstrip()
table = []
while l != "":
line = [x for x in l.split(" ") if x != '']
try:
table.append(
(line[0], [_gp_float(x) for x in line[1: -1]],
_gp_int(line[-1])))
except ValueError:
table.append(
(line[0], [None] * len(alleles), 0))
l = f.readline().rstrip()
return alleles, table
def _read_table(f, funs):
table = []
l = f.readline().rstrip()
while '---' not in l:
l = f.readline().rstrip()
l = f.readline().rstrip()
while '===' not in l and '---' not in l and l != "":
toks = [x for x in l.split(" ") if x != ""]
line = []
for i in range(len(toks)):
try:
line.append(funs[i](toks[i]))
except ValueError:
line.append(toks[i]) # Could not cast
table.append(tuple(line))
l = f.readline().rstrip()
return table
def _read_triangle_matrix(f):
matrix = []
l = f.readline().rstrip()
while l != "":
matrix.append(
[_gp_float(x) for x in [y for y in l.split(" ") if y != ""]])
l = f.readline().rstrip()
return matrix
def _read_headed_triangle_matrix(f):
matrix = {}
header = f.readline().rstrip()
if '---' in header or '===' in header:
header = f.readline().rstrip()
nlines = len([x for x in header.split(' ') if x != '']) - 1
for line_pop in range(nlines):
l = f.readline().rstrip()
vals = [x for x in l.split(' ')[1:] if x != '']
clean_vals = []
for val in vals:
try:
clean_vals.append(_gp_float(val))
except ValueError:
clean_vals.append(None)
for col_pop in range(len(clean_vals)):
matrix[(line_pop + 1, col_pop)] = clean_vals[col_pop]
return matrix
def _hw_func(stream, is_locus, has_fisher=False):
l = stream.readline()
if is_locus:
hook = "Locus "
else:
hook = " Pop : "
while l != "":
if l.startswith(hook):
stream.readline()
stream.readline()
stream.readline()
table = _read_table(stream, [str, _gp_float, _gp_float, _gp_float, _gp_float, _gp_int, str])
# loci might mean pop if hook="Locus "
loci = {}
for entry in table:
if len(entry) < 3:
loci[entry[0]] = None
else:
locus, p, se, fis_wc, fis_rh, steps = entry[:-1]
if se == "-":
se = None
loci[locus] = p, se, fis_wc, fis_rh, steps
return loci
l = stream.readline()
# self.done = True
raise StopIteration
class _FileIterator(object):
"""Iterator which crawls over a stream of lines with a function (PRIVATE).
The generator function is expected to yield a tuple, while
consuming input
"""
def __init__(self, func, fname, handle=None):
self.func = func
if handle is None:
self.stream = open(fname)
else:
# For special cases where calling code wants to
# seek into the file before starting:
self.stream = handle
self.fname = fname
self.done = False
def __iter__(self):
if self.done:
self.done = True
raise StopIteration
return self
def __next__(self):
return self.func(self)
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def __del__(self):
self.stream.close()
try:
os.remove(self.fname)
except OSError:
# Jython seems to call the iterator twice
pass
class _GenePopCommandline(AbstractCommandline):
"""Command Line Wrapper for GenePop (PRIVATE)."""
def __init__(self, genepop_dir=None, cmd='Genepop', **kwargs):
self.parameters = [
_Argument(["command"], "GenePop option to be called",
is_required=True),
_Argument(["mode"], "Should allways be batch", is_required=True),
_Argument(["input"], "Input file", is_required=True),
_Argument(["Dememorization"], "Dememorization step"),
_Argument(["BatchNumber"], "Number of MCMC batches"),
_Argument(["BatchLength"], "Length of MCMC chains"),
_Argument(["HWtests"], "Enumeration or MCMC"),
_Argument(["IsolBDstatistic"], "IBD statistic (a or e)"),
_Argument(["MinimalDistance"], "Minimal IBD distance"),
_Argument(["GeographicScale"], "Log or Linear"),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
self.set_parameter("mode", "Mode=Batch")
def set_menu(self, option_list):
"""Sets the menu option.
Example set_menu([6,1]) = get all F statistics (menu 6.1)
"""
self.set_parameter("command", "MenuOptions=" +
".".join(str(x) for x in option_list))
def set_input(self, fname):
"""Sets the input file name."""
self.set_parameter("input", "InputFile=" + fname)
class GenePopController(object):
def __init__(self, genepop_dir=None):
"""Initializes the controller.
genepop_dir is the directory where GenePop is.
The binary should be called Genepop (capital G)
"""
self.controller = _GenePopCommandline(genepop_dir)
def _get_opts(self, dememorization, batches, iterations, enum_test=None):
opts = {}
opts["Dememorization"] = dememorization
opts["BatchNumber"] = batches
opts["BatchLength"] = iterations
if enum_test is not None:
if enum_test is True:
opts["HWtests"] = "Enumeration"
else:
opts["HWtests"] = "MCMC"
return opts
def _run_genepop(self, extensions, option, fname, opts={}):
cwd = os.getcwd()
temp_dir = tempfile.mkdtemp()
os.chdir(temp_dir)
self.controller.set_menu(option)
if os.path.isabs(fname):
self.controller.set_input(fname)
else:
self.controller.set_input(cwd + os.sep + fname)
for opt in opts:
self.controller.set_parameter(opt, opt + "=" + str(opts[opt]))
self.controller() # checks error level is zero
os.chdir(cwd)
shutil.rmtree(temp_dir)
return
def _test_pop_hz_both(self, fname, type, ext, enum_test=True,
dememorization=10000, batches=20,
iterations=5000):
"""Hardy-Weinberg test for heterozygote deficiency/excess.
Returns a population iterator containing a dictionary where
dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps).
Some loci have a None if the info is not available.
SE might be none (for enumerations).
"""
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, type], fname, opts)
def hw_func(self):
return _hw_func(self.stream, False)
return _FileIterator(hw_func, fname + ext)
def _test_global_hz_both(self, fname, type, ext, enum_test=True,
dememorization=10000, batches=20,
iterations=5000):
"""Global Hardy-Weinberg test for heterozygote deficiency/excess.
Returns a triple with:
- A list per population containing (pop_name, P-val, SE, switches).
Some pops have a None if the info is not available.
SE might be none (for enumerations).
- A list per loci containing (locus_name, P-val, SE, switches).
Some loci have a None if the info is not available.
SE might be none (for enumerations).
- Overall results (P-val, SE, switches).
"""
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, type], fname, opts)
def hw_pop_func(self):
return _read_table(self.stream, [str, _gp_float, _gp_float, _gp_float])
with open(fname + ext) as f1:
l = f1.readline()
while "by population" not in l:
l = f1.readline()
pop_p = _read_table(f1, [str, _gp_float, _gp_float, _gp_float])
with open(fname + ext) as f2:
l = f2.readline()
while "by locus" not in l:
l = f2.readline()
loc_p = _read_table(f2, [str, _gp_float, _gp_float, _gp_float])
with open(fname + ext) as f:
l = f.readline()
while "all locus" not in l:
l = f.readline()
f.readline()
f.readline()
f.readline()
f.readline()
l = f.readline().rstrip()
p, se, switches = tuple(_gp_float(x) for x in [y for y in l.split(" ") if y != ""])
return pop_p, loc_p, (p, se, switches)
# 1.1
def test_pop_hz_deficiency(self, fname, enum_test=True,
dememorization=10000, batches=20,
iterations=5000):
"""Hardy-Weinberg test for heterozygote deficiency.
Returns a population iterator containing a dictionary wehre
dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps).
Some loci have a None if the info is not available.
SE might be none (for enumerations).
"""
return self._test_pop_hz_both(fname, 1, ".D", enum_test,
dememorization, batches, iterations)
# 1.2
def test_pop_hz_excess(self, fname, enum_test=True,
dememorization=10000, batches=20,
iterations=5000):
"""Hardy-Weinberg test for heterozygote deficiency.
Returns a population iterator containing a dictionary where
dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps).
Some loci have a None if the info is not available.
SE might be none (for enumerations).
"""
return self._test_pop_hz_both(fname, 2, ".E", enum_test,
dememorization, batches, iterations)
# 1.3 P file
def test_pop_hz_prob(self, fname, ext, enum_test=False,
dememorization=10000, batches=20,
iterations=5000):
"""Hardy-Weinberg test based on probability.
Returns 2 iterators and a final tuple:
1. Returns a loci iterator containing:
- A dictionary[pop_pos]=(P-val, SE, Fis-WC, Fis-RH, steps).
Some pops have a None if the info is not available.
SE might be none (for enumerations).
- Result of Fisher's test (Chi2, deg freedom, prob).
2. Returns a population iterator containing:
- A dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps).
Some loci have a None if the info is not available.
SE might be none (for enumerations).
- Result of Fisher's test (Chi2, deg freedom, prob).
3. Final tuple (Chi2, deg freedom, prob).
"""
opts = self._get_opts(dememorization, batches, iterations, enum_test)
self._run_genepop([ext], [1, 3], fname, opts)
def hw_prob_loci_func(self):
return _hw_func(self.stream, True, True)
def hw_prob_pop_func(self):
return _hw_func(self.stream, False, True)
shutil.copyfile(fname + ".P", fname + ".P2")
return _FileIterator(hw_prob_loci_func, fname + ".P"), _FileIterator(hw_prob_pop_func, fname + ".P2")
# 1.4
def test_global_hz_deficiency(self, fname, enum_test=True,
dememorization=10000, batches=20,
iterations=5000):
"""Global Hardy-Weinberg test for heterozygote deficiency.
Returns a triple with:
- An list per population containing (pop_name, P-val, SE, switches).
Some pops have a None if the info is not available.
SE might be none (for enumerations).
- An list per loci containing (locus_name, P-val, SE, switches).
Some loci have a None if the info is not available.
SE might be none (for enumerations).
- Overall results (P-val, SE, switches).
"""
return self._test_global_hz_both(fname, 4, ".DG", enum_test,
dememorization, batches, iterations)
# 1.5
def test_global_hz_excess(self, fname, enum_test=True,
dememorization=10000, batches=20,
iterations=5000):
"""Global Hardy-Weinberg test for heterozygote excess.
Returns a triple with:
- A list per population containing (pop_name, P-val, SE, switches).
Some pops have a None if the info is not available.
SE might be none (for enumerations).
- A list per loci containing (locus_name, P-val, SE, switches).
Some loci have a None if the info is not available.
SE might be none (for enumerations).
- Overall results (P-val, SE, switches)
"""
return self._test_global_hz_both(fname, 5, ".EG", enum_test,
dememorization, batches, iterations)
# 2.1
def test_ld(self, fname, dememorization=10000,
batches=20, iterations=5000):
opts = self._get_opts(dememorization, batches, iterations)
self._run_genepop([".DIS"], [2, 1], fname, opts)
def ld_pop_func(self):
current_pop = None
l = self.stream.readline().rstrip()
if l == "":
self.done = True
raise StopIteration
toks = [x for x in l.split(" ") if x != ""]
pop, locus1, locus2 = toks[0], toks[1], toks[2]
if not hasattr(self, "start_locus1"):
start_locus1, start_locus2 = locus1, locus2
current_pop = -1
if locus1 == start_locus1 and locus2 == start_locus2:
current_pop += 1
if toks[3] == "No":
return current_pop, pop, (locus1, locus2), None
p, se, switches = _gp_float(toks[3]), _gp_float(toks[4]), _gp_int(toks[5])
return current_pop, pop, (locus1, locus2), (p, se, switches)
def ld_func(self):
l = self.stream.readline().rstrip()
if l == "":
self.done = True
raise StopIteration
toks = [x for x in l.split(" ") if x != ""]
locus1, locus2 = toks[0], toks[2]
try:
chi2, df, p = _gp_float(toks[3]), _gp_int(toks[4]), _gp_float(toks[5])
except ValueError:
return (locus1, locus2), None
return (locus1, locus2), (chi2, df, p)
f1 = open(fname + ".DIS")
l = f1.readline()
while "----" not in l:
l = f1.readline()
shutil.copyfile(fname + ".DIS", fname + ".DI2")
f2 = open(fname + ".DI2")
l = f2.readline()
while "Locus pair" not in l:
l = f2.readline()
while "----" not in l:
l = f2.readline()
return (_FileIterator(ld_pop_func, fname + ".DIS", f1),
_FileIterator(ld_func, fname + ".DI2", f2))
# 2.2
def create_contingency_tables(self, fname):
raise NotImplementedError
# 3.1 PR/GE files
def test_genic_diff_all(self, fname, dememorization=10000,
batches=20, iterations=5000):
raise NotImplementedError
# 3.2 PR2/GE2 files
def test_genic_diff_pair(self, fname, dememorization=10000,
batches=20, iterations=5000):
raise NotImplementedError
# 3.3 G files
def test_genotypic_diff_all(self, fname, dememorization=10000,
batches=20, iterations=5000):
raise NotImplementedError
# 3.4 2G2 files
def test_genotypic_diff_pair(self, fname, dememorization=10000,
batches=20, iterations=5000):
raise NotImplementedError
# 4
def estimate_nm(self, fname):
self._run_genepop(["PRI"], [4], fname)
with open(fname + ".PRI") as f:
lines = f.readlines() # Small file, it is ok
for line in lines:
m = re.search("Mean sample size: ([.0-9]+)", line)
if m is not None:
mean_sample_size = _gp_float(m.group(1))
m = re.search("Mean frequency of private alleles p\(1\)= ([.0-9]+)", line)
if m is not None:
mean_priv_alleles = _gp_float(m.group(1))
m = re.search("N=10: ([.0-9]+)", line)
if m is not None:
mig10 = _gp_float(m.group(1))
m = re.search("N=25: ([.0-9]+)", line)
if m is not None:
mig25 = _gp_float(m.group(1))
m = re.search("N=50: ([.0-9]+)", line)
if m is not None:
mig50 = _gp_float(m.group(1))
m = re.search("for size= ([.0-9]+)", line)
if m is not None:
mig_corrected = _gp_float(m.group(1))
os.remove(fname + ".PRI")
return mean_sample_size, mean_priv_alleles, mig10, mig25, mig50, mig_corrected
# 5.1
def calc_allele_genotype_freqs(self, fname):
"""Calculates allele and genotype frequencies per locus and per sample.
Parameters:
- fname - file name
Returns tuple with 2 elements:
- Population iterator with
- population name
- Locus dictionary with key = locus name and content tuple as
Genotype List with
(Allele1, Allele2, observed, expected)
(expected homozygotes, observed hm,
expected heterozygotes, observed ht)
Allele frequency/Fis dictionary with allele as key and
(count, frequency, Fis Weir & Cockerham)
- Totals as a pair
- count
- Fis Weir & Cockerham,
- Fis Robertson & Hill
- Locus iterator with
- Locus name
- allele list
- Population list with a triple
- population name
- list of allele frequencies in the same order as allele list above
- number of genes
Will create a file called fname.INF
"""
self._run_genepop(["INF"], [5, 1], fname)
# First pass, general information
# num_loci = None
# num_pops = None
# with open(fname + ".INF") as f:
# l = f.readline()
# while (num_loci is None or num_pops is None) and l != '':
# m = re.search("Number of populations detected : ([0-9+])", l)
# if m is not None:
# num_pops = _gp_int(m.group(1))
# m = re.search("Number of loci detected : ([0-9+])", l)
# if m is not None:
# num_loci = _gp_int(m.group(1))
# l = f.readline()
def pop_parser(self):
if hasattr(self, "old_line"):
l = self.old_line
del self.old_line
else:
l = self.stream.readline()
loci_content = {}
while l != '':
l = l.rstrip()
if "Tables of allelic frequencies for each locus" in l:
return self.curr_pop, loci_content
match = re.match(".*Pop: (.+) Locus: (.+)", l)
if match is not None:
pop = match.group(1).rstrip()
locus = match.group(2)
if not hasattr(self, "first_locus"):
self.first_locus = locus
if hasattr(self, "curr_pop"):
if self.first_locus == locus:
old_pop = self.curr_pop
# self.curr_pop = pop
self.old_line = l
del self.first_locus
del self.curr_pop
return old_pop, loci_content
self.curr_pop = pop
else:
l = self.stream.readline()
continue
geno_list = []
l = self.stream.readline()
if "No data" in l:
continue
while "Genotypes Obs." not in l:
l = self.stream.readline()
while l != "\n":
m2 = re.match(" +([0-9]+) , ([0-9]+) *([0-9]+) *(.+)", l)
if m2 is not None:
geno_list.append((_gp_int(m2.group(1)),
_gp_int(m2.group(2)),
_gp_int(m2.group(3)),
_gp_float(m2.group(4))))
else:
l = self.stream.readline()
continue
l = self.stream.readline()
while "Expected number of ho" not in l:
l = self.stream.readline()
expHo = _gp_float(l[38:])
l = self.stream.readline()
obsHo = _gp_int(l[38:])
l = self.stream.readline()
expHe = _gp_float(l[38:])
l = self.stream.readline()
obsHe = _gp_int(l[38:])
l = self.stream.readline()
while "Sample count" not in l:
l = self.stream.readline()
l = self.stream.readline()
freq_fis = {}
overall_fis = None
while "----" not in l:
vals = [x for x in l.rstrip().split(' ') if x != '']
if vals[0] == "Tot":
overall_fis = (_gp_int(vals[1]),
_gp_float(vals[2]),
_gp_float(vals[3]))
else:
freq_fis[_gp_int(vals[0])] = (_gp_int(vals[1]),
_gp_float(vals[2]),
_gp_float(vals[3]))
l = self.stream.readline()
loci_content[locus] = (geno_list,
(expHo, obsHo, expHe, obsHe),
freq_fis, overall_fis)
self.done = True
raise StopIteration
def locus_parser(self):
l = self.stream.readline()
while l != "":
l = l.rstrip()
match = re.match(" Locus: (.+)", l)
if match is not None:
locus = match.group(1)
alleles, table = _read_allele_freq_table(self.stream)
return locus, alleles, table
l = self.stream.readline()
self.done = True
raise StopIteration
shutil.copyfile(fname + ".INF", fname + ".IN2")
pop_iter = _FileIterator(pop_parser, fname + ".INF")
locus_iter = _FileIterator(locus_parser, fname + ".IN2")
return (pop_iter, locus_iter)
def _calc_diversities_fis(self, fname, ext):
self._run_genepop([ext], [5, 2], fname)
with open(fname + ext) as f:
l = f.readline()
while l != "":
l = l.rstrip()
if l.startswith("Statistics per sample over all loci with at least two individuals typed"):
avg_fis = _read_table(f, [str, _gp_float, _gp_float, _gp_float])
avg_Qintra = _read_table(f, [str, _gp_float])
l = f.readline()
def fis_func(self):
l = self.stream.readline()
while l != "":
l = l.rstrip()
m = re.search("Locus: (.+)", l)
if m is not None:
locus = m.group(1)
self.stream.readline()
if "No complete" in self.stream.readline():
return locus, None
self.stream.readline()
fis_table = _read_table(self.stream, [str, _gp_float, _gp_float, _gp_float])
self.stream.readline()
avg_qinter, avg_fis = tuple(_gp_float(x) for x in
[y for y in self.stream.readline().split(" ") if y != ""])
return locus, fis_table, avg_qinter, avg_fis
l = self.stream.readline()
self.done = True
raise StopIteration
return _FileIterator(fis_func, fname + ext), avg_fis, avg_Qintra
# 5.2
def calc_diversities_fis_with_identity(self, fname):
return self._calc_diversities_fis(fname, ".DIV")
# 5.3
def calc_diversities_fis_with_size(self, fname):
raise NotImplementedError
# 6.1 Less genotype frequencies
def calc_fst_all(self, fname):
"""Executes GenePop and gets Fst/Fis/Fit (all populations)
Parameters:
- fname - file name
Returns:
- (multiLocusFis, multiLocusFst, multiLocus Fit),
- Iterator of tuples
(Locus name, Fis, Fst, Fit, Qintra, Qinter)
Will create a file called fname.FST .
This does not return the genotype frequencies.
"""
self._run_genepop([".FST"], [6, 1], fname)
with open(fname + ".FST") as f:
l = f.readline()
while l != '':
if l.startswith(' All:'):
toks = [x for x in l.rstrip().split(' ') if x != ""]
try:
allFis = _gp_float(toks[1])
except ValueError:
allFis = None
try:
allFst = _gp_float(toks[2])
except ValueError:
allFst = None
try:
allFit = _gp_float(toks[3])
except ValueError:
allFit = None
l = f.readline()
def proc(self):
if hasattr(self, "last_line"):
l = self.last_line
del self.last_line
else:
l = self.stream.readline()
locus = None
fis = None
fst = None
fit = None
qintra = None
qinter = None
while l != '':
l = l.rstrip()
if l.startswith(' Locus:'):
if locus is not None:
self.last_line = l
return locus, fis, fst, fit, qintra, qinter
else:
locus = l.split(':')[1].lstrip()
elif l.startswith('Fis^='):
fis = _gp_float(l.split(' ')[1])
elif l.startswith('Fst^='):
fst = _gp_float(l.split(' ')[1])
elif l.startswith('Fit^='):
fit = _gp_float(l.split(' ')[1])
elif l.startswith('1-Qintra^='):
qintra = _gp_float(l.split(' ')[1])
elif l.startswith('1-Qinter^='):
qinter = _gp_float(l.split(' ')[1])
return locus, fis, fst, fit, qintra, qinter
l = self.stream.readline()
if locus is not None:
return locus, fis, fst, fit, qintra, qinter
self.stream.close()
self.done = True
raise StopIteration
return (allFis, allFst, allFit), _FileIterator(proc, fname + ".FST")
# 6.2
def calc_fst_pair(self, fname):
self._run_genepop([".ST2", ".MIG"], [6, 2], fname)
with open(fname + ".ST2") as f:
l = f.readline()
while l != "":
l = l.rstrip()
if l.startswith("Estimates for all loci"):
avg_fst = _read_headed_triangle_matrix(f)
l = f.readline()
def loci_func(self):
l = self.stream.readline()
while l != "":
l = l.rstrip()
m = re.search(" Locus: (.+)", l)
if m is not None:
locus = m.group(1)
matrix = _read_headed_triangle_matrix(self.stream)
return locus, matrix
l = self.stream.readline()
self.done = True
raise StopIteration
os.remove(fname + ".MIG")
return _FileIterator(loci_func, fname + ".ST2"), avg_fst
# 6.3
def calc_rho_all(self, fname):
raise NotImplementedError
# 6.4
def calc_rho_pair(self, fname):
raise NotImplementedError
def _calc_ibd(self, fname, sub, stat="a", scale="Log", min_dist=0.00001):
"""Calculates isolation by distance statistics
"""
self._run_genepop([".GRA", ".MIG", ".ISO"], [6, sub],
fname, opts={
"MinimalDistance": min_dist,
"GeographicScale": scale,
"IsolBDstatistic": stat})
with open(fname + ".ISO") as f:
f.readline()
f.readline()
f.readline()
f.readline()
estimate = _read_triangle_matrix(f)
f.readline()
f.readline()
distance = _read_triangle_matrix(f)
f.readline()
match = re.match("a = (.+), b = (.+)", f.readline().rstrip())
a = _gp_float(match.group(1))
b = _gp_float(match.group(2))
f.readline()
f.readline()
match = re.match(" b=(.+)", f.readline().rstrip())
bb = _gp_float(match.group(1))
match = re.match(".*\[(.+) ; (.+)\]", f.readline().rstrip())
bblow = _gp_float(match.group(1))
bbhigh = _gp_float(match.group(2))
os.remove(fname + ".MIG")
os.remove(fname + ".GRA")
os.remove(fname + ".ISO")
return estimate, distance, (a, b), (bb, bblow, bbhigh)
# 6.5
def calc_ibd_diplo(self, fname, stat="a", scale="Log", min_dist=0.00001):
"""Calculates isolation by distance statistics for diploid data.
See _calc_ibd for parameter details.
Note that each pop can only have a single individual and
the individual name has to be the sample coordinates.
"""
return self._calc_ibd(fname, 5, stat, scale, min_dist)
# 6.6
def calc_ibd_haplo(self, fname, stat="a", scale="Log", min_dist=0.00001):
"""Calculates isolation by distance statistics for haploid data.
See _calc_ibd for parameter details.
Note that each pop can only have a single individual and
the individual name has to be the sample coordinates.
"""
return self._calc_ibd(fname, 6, stat, scale, min_dist)
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/PopGen/GenePop/Controller.py
|
Python
|
apache-2.0
| 32,725
|
[
"Biopython"
] |
7695dccf0c8ecddab8636d2aa58ddbbb85db3a1fcd7fa006cc6ff3f30117d896
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import py.path
import pytest
import sys
import _pytest.pytester as pytester
from _pytest.pytester import HookRecorder
from _pytest.pytester import CwdSnapshot, SysModulesSnapshot, SysPathsSnapshot
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_NOTESTSCOLLECTED
def test_make_hook_recorder(testdir):
item = testdir.getitem("def test_func(): pass")
recorder = testdir.make_hook_recorder(item.config.pluginmanager)
assert not recorder.getfailures()
pytest.xfail("internal reportrecorder tests need refactoring")
class rep(object):
excinfo = None
passed = False
failed = True
skipped = False
when = "call"
recorder.hook.pytest_runtest_logreport(report=rep)
failures = recorder.getfailures()
assert failures == [rep]
failures = recorder.getfailures()
assert failures == [rep]
class rep(object):
excinfo = None
passed = False
failed = False
skipped = True
when = "call"
rep.passed = False
rep.skipped = True
recorder.hook.pytest_runtest_logreport(report=rep)
modcol = testdir.getmodulecol("")
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
rep.passed = False
rep.failed = True
rep.skipped = False
recorder.hook.pytest_collectreport(report=rep)
passed, skipped, failed = recorder.listoutcomes()
assert not passed and skipped and failed
numpassed, numskipped, numfailed = recorder.countoutcomes()
assert numpassed == 0
assert numskipped == 1
assert numfailed == 1
assert len(recorder.getfailedcollections()) == 1
recorder.unregister()
recorder.clear()
recorder.hook.pytest_runtest_logreport(report=rep)
pytest.raises(ValueError, "recorder.getfailures()")
def test_parseconfig(testdir):
config1 = testdir.parseconfig()
config2 = testdir.parseconfig()
assert config2 != config1
assert config1 != pytest.config
def test_testdir_runs_with_plugin(testdir):
testdir.makepyfile(
"""
pytest_plugins = "pytester"
def test_hello(testdir):
assert 1
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_runresult_assertion_on_xfail(testdir):
testdir.makepyfile(
"""
import pytest
pytest_plugins = "pytester"
@pytest.mark.xfail
def test_potato():
assert False
"""
)
result = testdir.runpytest()
result.assert_outcomes(xfailed=1)
assert result.ret == 0
def test_runresult_assertion_on_xpassed(testdir):
testdir.makepyfile(
"""
import pytest
pytest_plugins = "pytester"
@pytest.mark.xfail
def test_potato():
assert True
"""
)
result = testdir.runpytest()
result.assert_outcomes(xpassed=1)
assert result.ret == 0
def test_xpassed_with_strict_is_considered_a_failure(testdir):
testdir.makepyfile(
"""
import pytest
pytest_plugins = "pytester"
@pytest.mark.xfail(strict=True)
def test_potato():
assert True
"""
)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
assert result.ret != 0
def make_holder():
class apiclass(object):
def pytest_xyz(self, arg):
"x"
def pytest_xyz_noarg(self):
"x"
apimod = type(os)("api")
def pytest_xyz(arg):
"x"
def pytest_xyz_noarg():
"x"
apimod.pytest_xyz = pytest_xyz
apimod.pytest_xyz_noarg = pytest_xyz_noarg
return apiclass, apimod
@pytest.mark.parametrize("holder", make_holder())
def test_hookrecorder_basic(holder):
pm = PytestPluginManager()
pm.addhooks(holder)
rec = HookRecorder(pm)
pm.hook.pytest_xyz(arg=123)
call = rec.popcall("pytest_xyz")
assert call.arg == 123
assert call._name == "pytest_xyz"
pytest.raises(pytest.fail.Exception, "rec.popcall('abc')")
pm.hook.pytest_xyz_noarg()
call = rec.popcall("pytest_xyz_noarg")
assert call._name == "pytest_xyz_noarg"
def test_makepyfile_unicode(testdir):
global unichr
try:
unichr(65)
except NameError:
unichr = chr
testdir.makepyfile(unichr(0xFFFD))
def test_makepyfile_utf8(testdir):
"""Ensure makepyfile accepts utf-8 bytes as input (#2738)"""
utf8_contents = u"""
def setup_function(function):
mixed_encoding = u'São Paulo'
""".encode(
"utf-8"
)
p = testdir.makepyfile(utf8_contents)
assert u"mixed_encoding = u'São Paulo'".encode("utf-8") in p.read("rb")
class TestInlineRunModulesCleanup(object):
def test_inline_run_test_module_not_cleaned_up(self, testdir):
test_mod = testdir.makepyfile("def test_foo(): assert True")
result = testdir.inline_run(str(test_mod))
assert result.ret == EXIT_OK
# rewrite module, now test should fail if module was re-imported
test_mod.write("def test_foo(): assert False")
result2 = testdir.inline_run(str(test_mod))
assert result2.ret == EXIT_TESTSFAILED
def spy_factory(self):
class SysModulesSnapshotSpy(object):
instances = []
def __init__(self, preserve=None):
SysModulesSnapshotSpy.instances.append(self)
self._spy_restore_count = 0
self._spy_preserve = preserve
self.__snapshot = SysModulesSnapshot(preserve=preserve)
def restore(self):
self._spy_restore_count += 1
return self.__snapshot.restore()
return SysModulesSnapshotSpy
def test_inline_run_taking_and_restoring_a_sys_modules_snapshot(
self, testdir, monkeypatch
):
spy_factory = self.spy_factory()
monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory)
original = dict(sys.modules)
testdir.syspathinsert()
testdir.makepyfile(import1="# you son of a silly person")
testdir.makepyfile(import2="# my hovercraft is full of eels")
test_mod = testdir.makepyfile(
"""
import import1
def test_foo(): import import2"""
)
testdir.inline_run(str(test_mod))
assert len(spy_factory.instances) == 1
spy = spy_factory.instances[0]
assert spy._spy_restore_count == 1
assert sys.modules == original
assert all(sys.modules[x] is original[x] for x in sys.modules)
def test_inline_run_sys_modules_snapshot_restore_preserving_modules(
self, testdir, monkeypatch
):
spy_factory = self.spy_factory()
monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory)
test_mod = testdir.makepyfile("def test_foo(): pass")
testdir.inline_run(str(test_mod))
spy = spy_factory.instances[0]
assert not spy._spy_preserve("black_knight")
assert spy._spy_preserve("zope")
assert spy._spy_preserve("zope.interface")
assert spy._spy_preserve("zopelicious")
def test_external_test_module_imports_not_cleaned_up(self, testdir):
testdir.syspathinsert()
testdir.makepyfile(imported="data = 'you son of a silly person'")
import imported
test_mod = testdir.makepyfile(
"""
def test_foo():
import imported
imported.data = 42"""
)
testdir.inline_run(str(test_mod))
assert imported.data == 42
def test_assert_outcomes_after_pytest_error(testdir):
testdir.makepyfile("def test_foo(): assert True")
result = testdir.runpytest("--unexpected-argument")
with pytest.raises(ValueError, message="Pytest terminal report not found"):
result.assert_outcomes(passed=0)
def test_cwd_snapshot(tmpdir):
foo = tmpdir.ensure("foo", dir=1)
bar = tmpdir.ensure("bar", dir=1)
foo.chdir()
snapshot = CwdSnapshot()
bar.chdir()
assert py.path.local() == bar
snapshot.restore()
assert py.path.local() == foo
class TestSysModulesSnapshot(object):
key = "my-test-module"
def test_remove_added(self):
original = dict(sys.modules)
assert self.key not in sys.modules
snapshot = SysModulesSnapshot()
sys.modules[self.key] = "something"
assert self.key in sys.modules
snapshot.restore()
assert sys.modules == original
def test_add_removed(self, monkeypatch):
assert self.key not in sys.modules
monkeypatch.setitem(sys.modules, self.key, "something")
assert self.key in sys.modules
original = dict(sys.modules)
snapshot = SysModulesSnapshot()
del sys.modules[self.key]
assert self.key not in sys.modules
snapshot.restore()
assert sys.modules == original
def test_restore_reloaded(self, monkeypatch):
assert self.key not in sys.modules
monkeypatch.setitem(sys.modules, self.key, "something")
assert self.key in sys.modules
original = dict(sys.modules)
snapshot = SysModulesSnapshot()
sys.modules[self.key] = "something else"
snapshot.restore()
assert sys.modules == original
def test_preserve_modules(self, monkeypatch):
key = [self.key + str(i) for i in range(3)]
assert not any(k in sys.modules for k in key)
for i, k in enumerate(key):
monkeypatch.setitem(sys.modules, k, "something" + str(i))
original = dict(sys.modules)
def preserve(name):
return name in (key[0], key[1], "some-other-key")
snapshot = SysModulesSnapshot(preserve=preserve)
sys.modules[key[0]] = original[key[0]] = "something else0"
sys.modules[key[1]] = original[key[1]] = "something else1"
sys.modules[key[2]] = "something else2"
snapshot.restore()
assert sys.modules == original
def test_preserve_container(self, monkeypatch):
original = dict(sys.modules)
assert self.key not in original
replacement = dict(sys.modules)
replacement[self.key] = "life of brian"
snapshot = SysModulesSnapshot()
monkeypatch.setattr(sys, "modules", replacement)
snapshot.restore()
assert sys.modules is replacement
assert sys.modules == original
@pytest.mark.parametrize("path_type", ("path", "meta_path"))
class TestSysPathsSnapshot(object):
other_path = {"path": "meta_path", "meta_path": "path"}
@staticmethod
def path(n):
return "my-dirty-little-secret-" + str(n)
def test_restore(self, monkeypatch, path_type):
other_path_type = self.other_path[path_type]
for i in range(10):
assert self.path(i) not in getattr(sys, path_type)
sys_path = [self.path(i) for i in range(6)]
monkeypatch.setattr(sys, path_type, sys_path)
original = list(sys_path)
original_other = list(getattr(sys, other_path_type))
snapshot = SysPathsSnapshot()
transformation = {"source": (0, 1, 2, 3, 4, 5), "target": (6, 2, 9, 7, 5, 8)}
assert sys_path == [self.path(x) for x in transformation["source"]]
sys_path[1] = self.path(6)
sys_path[3] = self.path(7)
sys_path.append(self.path(8))
del sys_path[4]
sys_path[3:3] = [self.path(9)]
del sys_path[0]
assert sys_path == [self.path(x) for x in transformation["target"]]
snapshot.restore()
assert getattr(sys, path_type) is sys_path
assert getattr(sys, path_type) == original
assert getattr(sys, other_path_type) == original_other
def test_preserve_container(self, monkeypatch, path_type):
other_path_type = self.other_path[path_type]
original_data = list(getattr(sys, path_type))
original_other = getattr(sys, other_path_type)
original_other_data = list(original_other)
new = []
snapshot = SysPathsSnapshot()
monkeypatch.setattr(sys, path_type, new)
snapshot.restore()
assert getattr(sys, path_type) is new
assert getattr(sys, path_type) == original_data
assert getattr(sys, other_path_type) is original_other
assert getattr(sys, other_path_type) == original_other_data
def test_testdir_subprocess(testdir):
testfile = testdir.makepyfile("def test_one(): pass")
assert testdir.runpytest_subprocess(testfile).ret == 0
def test_unicode_args(testdir):
result = testdir.runpytest("-k", u"💩")
assert result.ret == EXIT_NOTESTSCOLLECTED
|
davidszotten/pytest
|
testing/test_pytester.py
|
Python
|
mit
| 12,761
|
[
"Brian"
] |
e554895112d67e33f0b270bcf32dc3f04fde783db0bb485bc8a970304690f46b
|
# -*- coding: utf-8 -*-
"""Protein Substitutions.
Protein substitutions are legacy statements defined in BEL 1.0. BEL 2.0 recommends using HGVS strings. Luckily,
the information contained in a BEL 1.0 encoding, such as :code:`p(HGNC:APP,sub(R,275,H))` can be
automatically translated to the appropriate HGVS :code:`p(HGNC:APP, var(p.Arg275His))`, assuming that all
substitutions are using the reference protein sequence for numbering and not the genomic reference.
The previous statements both produce the underlying data:
.. code-block:: python
from pybel.constants import *
{
FUNCTION: GENE,
NAMESPACE: 'HGNC',
NAME: 'APP',
VARIANTS: [
{
KIND: HGVS,
IDENTIFIER: 'p.Arg275His',
},
],
}
.. seealso::
- BEL 2.0 specification on `protein substitutions
<http://openbel.org/language/version_2.0/bel_specification_version_2.0.html#_variants_2>`_
- PyBEL module :py:class:`pybel.parser.modifiers.get_protein_substitution_language`
"""
import logging
from pyparsing import ParserElement
from pyparsing import pyparsing_common as ppc
from .constants import amino_acid
from ..utils import nest, one_of_tags
from ...constants import HGVS, KIND, PSUB_POSITION, PSUB_REFERENCE, PSUB_VARIANT
__all__ = [
"get_protein_substitution_language",
]
logger = logging.getLogger(__name__)
psub_tag = one_of_tags(tags=["sub", "substitution"], canonical_tag=HGVS, name=KIND)
def get_protein_substitution_language() -> ParserElement:
"""Build a protein substitution parser."""
parser_element = psub_tag + nest(
amino_acid(PSUB_REFERENCE),
ppc.integer(PSUB_POSITION),
amino_acid(PSUB_VARIANT),
)
parser_element.setParseAction(_handle_psub)
return parser_element
def _handle_psub(line, _, tokens):
upgraded = "p.{}{}{}".format(tokens[PSUB_REFERENCE], tokens[PSUB_POSITION], tokens[PSUB_VARIANT])
logger.log(5, "sub() in p() is deprecated: %s. Upgraded to %s", line, upgraded)
tokens[HGVS] = upgraded
del tokens[PSUB_REFERENCE]
del tokens[PSUB_POSITION]
del tokens[PSUB_VARIANT]
return tokens
|
pybel/pybel
|
src/pybel/parser/modifiers/protein_substitution.py
|
Python
|
mit
| 2,180
|
[
"Pybel"
] |
5830377ea81945d0397388211e54f4cf63d12c3cb90f2d8543c20252eb8306bd
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
__credits__ = ["Greg Caporaso", "Daniel McDonald", "Gavin Huttley",
"Rob Knight", "Doug Wendel", "Jai Ram Rideout",
"Jose Antonio Navas Molina"]
import os
from copy import copy
from glob import glob
from os.path import abspath, exists, isdir, isfile, split
from optparse import (Option, OptionParser, OptionGroup, OptionValueError,
OptionError)
from pyqi.core.interface import (Interface, InterfaceInputOption,
InterfaceOutputOption, InterfaceUsageExample)
from pyqi.core.factory import general_factory
from pyqi.core.exception import IncompetentDeveloperError
from pyqi.core.command import Parameter
class OptparseResult(InterfaceOutputOption):
def __init__(self, **kwargs):
super(OptparseResult, self).__init__(**kwargs)
def _validate_option(self):
pass
class OptparseOption(InterfaceInputOption):
"""An augmented option that expands a ``CommandIn`` into an Option"""
def __init__(self, **kwargs):
super(OptparseOption, self).__init__(**kwargs)
def _validate_option(self):
# optparse takes care of validating InputType, InputAction, and
# ShortName, so we don't need any checks here.
pass
def __str__(self):
if self.ShortName is None:
return '--%s' % self.Name
else:
return '-%s/--%s' % (self.ShortName, self.Name)
def getOptparseOption(self):
if self.Required:
# If the option doesn't already end with [REQUIRED], add it.
help_text = self.Help
if not help_text.strip().endswith('[REQUIRED]'):
help_text += ' [REQUIRED]'
if self.ShortName is None:
option = PyqiOption('--' + self.Name, type=self.Type,
action=self.Action, help=help_text)
else:
option = PyqiOption('-' + self.ShortName,
'--' + self.Name, type=self.Type,
action=self.Action, help=help_text)
else:
if self.DefaultDescription is None:
help_text = '%s [default: %%default]' % self.Help
else:
help_text = '%s [default: %s]' % (self.Help,
self.DefaultDescription)
if self.ShortName is None:
option = PyqiOption('--' + self.Name, type=self.Type,
action=self.Action, help=help_text,
default=self.Default)
else:
option = PyqiOption('-' + self.ShortName,
'--' + self.Name, type=self.Type,
action=self.Action, help=help_text,
default=self.Default)
return option
class OptparseUsageExample(InterfaceUsageExample):
"""Provide structure to a usage example"""
def _validate_usage_example(self):
if self.ShortDesc is None:
raise IncompetentDeveloperError("Must define ShortDesc")
if self.LongDesc is None:
raise IncompetentDeveloperError("Must define LongDesc")
if self.Ex is None:
raise IncompetentDeveloperError("Must define Ex")
class OptparseInterface(Interface):
"""A command line interface"""
DisallowPositionalArguments = True
HelpOnNoArguments = True
OptionalInputLine = '[] indicates optional input (order unimportant)'
RequiredInputLine = '{} indicates required input (order unimportant)'
def __init__(self, **kwargs):
super(OptparseInterface, self).__init__(**kwargs)
def _validate_usage_examples(self, usage_examples):
super(OptparseInterface, self)._validate_usage_examples(usage_examples)
if len(usage_examples) < 1:
raise IncompetentDeveloperError("There are no usage examples "
"associated with this command.")
def _the_in_validator(self, in_):
"""Validate input coming from the command line"""
if not isinstance(in_, list):
raise IncompetentDeveloperError("Unsupported input '%r'. Input "
"must be a list." % in_)
def _the_out_validator(self, out_):
"""Validate output coming from the command call"""
if not isinstance(out_, dict):
raise IncompetentDeveloperError("Unsupported result '%r'. Result "
"must be a dict." % out_)
def _input_handler(self, in_, *args, **kwargs):
"""Parses command-line input."""
required_opts = [opt for opt in self._get_inputs() if opt.Required]
optional_opts = [opt for opt in self._get_inputs() if not opt.Required]
# Build the usage and version strings
usage = self._build_usage_lines(required_opts)
version = 'Version: %prog ' + self._get_version()
# Instantiate the command line parser object
parser = OptionParser(usage=usage, version=version)
# If the command has required options and no input arguments were
# provided, print the help string.
if len(required_opts) > 0 and self.HelpOnNoArguments and len(in_) == 0:
parser.print_usage()
return parser.exit(-1)
if required_opts:
# Define an option group so all required options are grouped
# together and under a common header.
required = OptionGroup(parser, "REQUIRED options",
"The following options must be provided "
"under all circumstances.")
for ro in required_opts:
required.add_option(ro.getOptparseOption())
parser.add_option_group(required)
# Add the optional options.
for oo in optional_opts:
parser.add_option(oo.getOptparseOption())
#####
# THIS IS THE NATURAL BREAKING POINT FOR THIS FUNCTIONALITY
#####
# Parse our input.
opts, args = parser.parse_args(in_)
# If positional arguments are not allowed, and any were provided, raise
# an error.
if self.DisallowPositionalArguments and len(args) != 0:
parser.error("Positional argument detected: %s\n" % str(args[0]) +
" Be sure all parameters are identified by their option name.\n" +
" (e.g.: include the '-i' in '-i INPUT_DIR')")
# Test that all required options were provided.
if required_opts:
# dest may be different from the original option name because
# optparse converts names from dashed to underscored.
required_option_ids = [(o.dest, o.get_opt_string())
for o in required.option_list]
for required_dest, required_name in required_option_ids:
if getattr(opts, required_dest) is None:
parser.error('Required option %s omitted.' % required_name)
# Build up command input dictionary. This will be passed to
# Command.__call__ as kwargs.
self._optparse_input = opts.__dict__
cmd_input_kwargs = {}
for option in self._get_inputs():
if option.Parameter is not None:
param_name = option.getParameterName()
optparse_clean_name = \
self._get_optparse_clean_name(option.Name)
if option.Handler is None:
value = self._optparse_input[optparse_clean_name]
else:
value = option.Handler(
self._optparse_input[optparse_clean_name])
cmd_input_kwargs[param_name] = value
return cmd_input_kwargs
def _build_usage_lines(self, required_options):
""" Build the usage string from components """
line1 = 'usage: %prog [options] ' + \
'{%s}' % ' '.join(['%s %s' % (str(rp),rp.Name.upper())
for rp in required_options])
formatted_usage_examples = []
for usage_example in self._get_usage_examples():
short_description = usage_example.ShortDesc.strip(':').strip()
long_description = usage_example.LongDesc.strip(':').strip()
example = usage_example.Ex.strip()
if short_description:
formatted_usage_examples.append('%s: %s\n %s' %
(short_description,
long_description, example))
else:
formatted_usage_examples.append('%s\n %s' %
(long_description,example))
formatted_usage_examples = '\n\n'.join(formatted_usage_examples)
lines = (line1,
'', # Blank line
self.OptionalInputLine,
self.RequiredInputLine,
'', # Blank line
self.CmdInstance.LongDescription,
'', # Blank line
'Example usage: ',
'Print help message and exit',
' %prog -h\n',
formatted_usage_examples)
return '\n'.join(lines)
def _output_handler(self, results):
"""Deal with things in output if we know how"""
handled_results = {}
for output in self._get_outputs():
rk = output.Name
if output.InputName is None:
handled_results[rk] = output.Handler(rk, results[rk])
else:
optparse_clean_name = \
self._get_optparse_clean_name(output.InputName)
opt_value = self._optparse_input[optparse_clean_name]
handled_results[rk] = output.Handler(rk, results[rk],
opt_value)
return handled_results
def _get_optparse_clean_name(self, name):
# optparse converts dashes to underscores in long option names.
return name.replace('-', '_')
def optparse_factory(command_constructor, usage_examples, inputs, outputs,
version):
"""Optparse command line interface factory
command_constructor - a subclass of ``Command``
usage_examples - usage examples for using ``command_constructor`` via a
command line interface.
inputs - config ``inputs`` or a list of ``OptparseOptions``
outputs - config ``outputs`` or a list of ``OptparseResults``
version - config ``__version__`` (a version string)
"""
return general_factory(command_constructor, usage_examples, inputs,
outputs, version, OptparseInterface)
def optparse_main(interface_object, local_argv):
"""Construct and execute an interface object"""
optparse_cmd = interface_object()
result = optparse_cmd(local_argv[1:])
return 0
# Definition of PyqiOption option type, a subclass of Option that contains
# specific types for filepaths and directory paths.
#
# This code was derived from PyCogent (http://www.pycogent.org) and QIIME
# (http://www.qiime.org), where it was initally developed.
#
# QIIME and PyCogent are GPL projects, but we obtained permission from the
# authors of this code to port it to pyqi (and keep it under pyqi's BSD
# license).
#
# TODO: this code needs to be refactored to better fit the pyqi framework.
# Should probably get added to the OptparseInterface class.
def check_existing_filepath(option, opt, value):
if not exists(value):
raise OptionValueError(
"option %s: file does not exist: %r" % (opt, value))
elif not isfile(value):
raise OptionValueError(
"option %s: not a regular file (can't be a directory!): %r" % (opt, value))
else:
return value
def check_existing_filepaths(option, opt, value):
paths = []
for v in value.split(','):
fps = glob(v)
if len(fps) == 0:
raise OptionValueError(
"No filepaths match pattern/name '%s'. "
"All patterns must be matched at least once." % v)
else:
paths.extend(fps)
values = []
for v in paths:
check_existing_filepath(option,opt,v)
values.append(v)
return values
def check_existing_dirpath(option, opt, value):
if not exists(value):
raise OptionValueError(
"option %s: directory does not exist: %r" % (opt, value))
elif not isdir(value):
raise OptionValueError(
"option %s: not a directory (can't be a file!): %r" % (opt, value))
else:
return value
def check_existing_dirpaths(option, opt, value):
paths = []
for v in value.split(','):
dps = glob(v)
if len(dps) == 0:
raise OptionValueError(
"No dirpaths match pattern/name '%s'."
"All patterns must be matched at least once." % v)
else:
paths.extend(dps)
values = []
for v in paths:
check_existing_dirpath(option, opt, v)
values.append(v)
return values
def check_new_filepath(option, opt, value):
if exists(value):
if isdir(value):
raise OptionValueError(
"option %s: output file exists and it is a directory: %r" %(opt,
value))
return value
def check_new_dirpath(option, opt, value):
if exists(value):
if isfile(value):
raise OptionValueError(
"option %s: output directory exists and it is a file: %r" %(opt,
value))
return value
def check_existing_path(option, opt, value):
if not exists(value):
raise OptionValueError(
"option %s: path does not exist: %r" % (opt, value))
return value
def check_new_path(option, opt, value):
return value
def check_multiple_choice(option, opt, value):
values = value.split(option.split_char)
for v in values:
if v not in option.mchoices:
choices = ",".join(map(repr, option.mchoices))
raise OptionValueError(
"option %s: invalid choice: %r (choose from %s)"
% (opt, v, choices))
return values
def check_blast_db(option, opt, value):
db_dir, db_name = split(abspath(value))
if not exists(db_dir):
raise OptionValueError(
"option %s: path does not exists: %r" % (opt, db_dir))
elif not isdir(db_dir):
raise OptionValueError(
"option %s: not a directory: %r" % (opt, db_dir))
return value
class PyqiOption(Option):
ATTRS = Option.ATTRS + ['mchoices','split_char']
TYPES = Option.TYPES + ("existing_path",
"new_path",
"existing_filepath",
"existing_filepaths",
"new_filepath",
"existing_dirpath",
"existing_dirpaths",
"new_dirpath",
"multiple_choice",
"blast_db")
TYPE_CHECKER = copy(Option.TYPE_CHECKER)
# for cases where the user specifies an existing file or directory
# as input, but it can be either a dir or a file
TYPE_CHECKER["existing_path"] = check_existing_path
# for cases where the user specifies a new file or directory
# as output, but it can be either a dir or a file
TYPE_CHECKER["new_path"] = check_new_path
# for cases where the user passes a single existing file
TYPE_CHECKER["existing_filepath"] = check_existing_filepath
# for cases where the user passes one or more existing files
# as a comma-separated list - paths are returned as a list
TYPE_CHECKER["existing_filepaths"] = check_existing_filepaths
# for cases where the user is passing a new path to be
# create (e.g., an output file)
TYPE_CHECKER["new_filepath"] = check_new_filepath
# for cases where the user is passing an existing directory
# (e.g., containing a set of input files)
TYPE_CHECKER["existing_dirpath"] = check_existing_dirpath
# for cases where the user passes one or more existing directories
# as a comma-separated list - paths are returned as a list
TYPE_CHECKER["existing_dirpaths"] = check_existing_dirpaths
# for cases where the user is passing a new directory to be
# create (e.g., an output dir which will contain many result files)
TYPE_CHECKER["new_dirpath"] = check_new_dirpath
# for cases where the user is passing one or more values
# as comma- or semicolon-separated list
# choices are returned as a list
TYPE_CHECKER["multiple_choice"] = check_multiple_choice
# for cases where the user is passing a blast database option
# blast_db is returned as a string
TYPE_CHECKER["blast_db"] = check_blast_db
def _check_multiple_choice(self):
if self.type == "multiple_choice":
if self.mchoices is None:
raise OptionError(
"must supply a list of mchoices for type '%s'" % self.type, self)
elif type(self.mchoices) not in (tuple, list):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.mchoices)).split("'")[1], self)
if self.split_char is None:
self.split_char = ','
elif self.mchoices is not None:
raise OptionError(
"must not supply mchoices for type %r" % self.type, self)
CHECK_METHODS = Option.CHECK_METHODS + [_check_multiple_choice]
|
biocore/pyqi
|
pyqi/core/interfaces/optparse/__init__.py
|
Python
|
bsd-3-clause
| 18,250
|
[
"BLAST"
] |
ef61816a67007720821c01d0f16c37d9cbfd26b455d1587cd796370144918d62
|
from collections import OrderedDict, defaultdict
import logging
import os
import math
import simtk.unit as units
from intermol.atom import Atom
from intermol.forces import *
import intermol.forces.forcefunctions as ff
from intermol.exceptions import (UnimplementedFunctional, UnsupportedFunctional,
UnimplementedSetting, UnsupportedSetting,
GromacsError, InterMolError)
from intermol.molecule import Molecule
from intermol.moleculetype import MoleculeType
from intermol.system import System
from intermol.gromacs.grofile_parser import GromacsGroParser
logger = logging.getLogger('InterMolLog')
ENGINE = 'gromacs'
def load(top_file, gro_file, include_dir=None, defines=None):
"""Load a set of GROMACS input files into a `System`.
Args:
top_filename:
gro_file:
include_dir:
defines:
Returns:
system:
"""
parser = GromacsParser(top_file, gro_file,
include_dir=include_dir, defines=defines)
return parser.read()
def save(top_file, gro_file, system):
"""Load a set of GROMACS input files into a `System`.
Args:
top_filename:
gro_file:
include_dir:
defines:
Returns:
system:
"""
parser = GromacsParser(top_file, gro_file, system)
return parser.write()
def default_gromacs_include_dir():
"""Find the location where gromacs #include files are referenced from, by
searching for (1) gromacs environment variables, (2) just using the default
gromacs install location, /usr/local/gromacs/share/gromacs/top. """
if 'GMXLIB' in os.environ:
return os.environ['GMXLIB']
if 'GMXDATA' in os.environ:
return os.path.join(os.environ['GMXDATA'], 'top')
if 'GMXBIN' in os.environ:
return os.path.abspath(os.path.join(
os.environ['GMXBIN'], '..', 'share', 'gromacs', 'top'))
return '/usr/local/share/gromacs/top'
class GromacsParser(object):
"""
A class containing methods required to read in a Gromacs(4.5.4) Topology File
"""
# 'lookup_*' is the inverse dictionary typically used for writing
gromacs_combination_rules = {
'1': 'Multiply-C6C12',
'2': 'Lorentz-Berthelot',
'3': 'Multiply-Sigeps'
}
lookup_gromacs_combination_rules = dict(
(v, k) for k, v in gromacs_combination_rules.items())
gromacs_pairs = {
# First three correspond to pairtype 1, last two pairtype 2.
# Letter is arbitrary.
'1A': LjCPair,
'1B': LjSigepsPair,
'1C': LjDefaultPair,
'2A': LjqCPair,
'2B': LjqSigepsPair,
'2C': LjqDefaultPair
}
lookup_gromacs_pairs = dict((v, k) for k, v in gromacs_pairs.items())
gromacs_pair_types = dict(
(k, eval(v.__name__ + 'Type')) for k, v in gromacs_pairs.items())
gromacs_virtuals = {
'2-1': TwoVirtual,
'3-1': ThreeLinearVirtual,
'3-2': ThreeFdVirtual,
'3-3': ThreeFadVirtual,
'3-4': ThreeOutVirtual,
'4-2': FourFdnVirtual
}
lookup_gromacs_virtuals = dict((v, k) for k, v in gromacs_virtuals.items())
gromacs_virtual_types = dict(
(k, eval(v.__name__ + 'Type')) for k, v in gromacs_virtuals.items())
gromacs_bonds = {
'1': HarmonicBond,
'2': G96Bond,
'3': MorseBond,
'4': CubicBond,
'5': ConnectionBond,
'6': HarmonicPotentialBond,
'7': FeneBond
}
lookup_gromacs_bonds = dict((v, k) for k, v in gromacs_bonds.items())
gromacs_bond_types = dict(
(k, eval(v.__name__ + 'Type')) for k, v in gromacs_bonds.items())
def canonical_bond(self, params, bond, direction='into'):
"""
Args:
params:
bond:
direction:
Returns:
"""
if direction == 'into':
return bond, params
else: # currently, no bonds need to be de-canonicalized
try:
b_type = self.lookup_gromacs_bonds[bond.__class__]
except KeyError:
raise UnsupportedFunctional(bond, ENGINE)
return b_type, params
gromacs_angles = {
'1': HarmonicAngle,
'2': CosineSquaredAngle,
'3': CrossBondBondAngle,
'4': CrossBondAngleAngle,
'5': UreyBradleyAngle,
'6': QuarticAngle,
'10': RestrictedBendingAngle
}
lookup_gromacs_angles = dict((v, k) for k, v in gromacs_angles.items())
gromacs_angle_types = dict(
(k, eval(v.__name__ + 'Type')) for k, v in gromacs_angles.items())
def canonical_angle(self, params, angle, direction='into'):
"""
Args:
params:
angle:
direction:
Returns:
"""
if direction == 'into':
return angle, params
else: # currently, no angles need to be de-canonicalized
try:
a_type = self.lookup_gromacs_angles[angle.__class__]
except KeyError:
raise UnsupportedFunctional(angle, ENGINE)
return a_type, params
gromacs_dihedrals = {
# TrigDihedrals are actually used for 1, 4, and 9. Can't use lists as keys!
'1': ProperPeriodicDihedral,
'2': ImproperHarmonicDihedral,
'3': RbDihedral,
'4': ProperPeriodicDihedral,
'5': FourierDihedral,
'9': ProperPeriodicDihedral,
'10': RestrictedBendingDihedral,
'11': BendingTorsionDihedral,
'Trig': TrigDihedral
}
# have to invert manually because of canonical conversion collapse of types.
lookup_gromacs_dihedrals = {
TrigDihedral: 'Trig',
ImproperHarmonicDihedral: '2',
RbDihedral: '3',
FourierDihedral: '5',
RestrictedBendingDihedral: '10',
BendingTorsionDihedral: '11'
}
gromacs_dihedral_types = dict(
(k, eval(v.__name__ + 'Type')) for k, v in gromacs_dihedrals.items())
def canonical_dihedral(self, params, dihedral, direction='into'):
"""
We can fit everything into two types of dihedrals - dihedral_trig, and
improper harmonic. Dihedral trig is of the form
fc0 + sum_i=1^6 fci (cos(nx-phi)
Proper dihedrals can be stored easily in this form, since they have
only 1 n. Improper dihedrals can as well (flag as improper). RB can be
stored as well, assuming phi = 0 or 180. Fourier can also be stored. A
full dihedral trig can be decomposed into multiple proper dihedrals.
Will need to handle multiple dihedrals little differently in that we
will need to add multiple 9 dihedrals together into a single
dihedral_trig, as long as they have the same phi angle (seems to be
always the case).
Args:
params:
dihedral:
direction:
Returns:
"""
if direction == 'into':
if dihedral == ProperPeriodicDihedralType:
convertfunc = convert_dihedral_from_proper_to_trig
converted_dihedral = TrigDihedralType
elif dihedral == ProperPeriodicDihedral:
convertfunc = convert_dihedral_from_proper_to_trig
converted_dihedral = TrigDihedral
elif dihedral == RbDihedralType:
convertfunc = convert_dihedral_from_RB_to_trig
# Sign convention from psi to phi.
params['C1'] *= -1
params['C3'] *= -1
params['C5'] *= -1
converted_dihedral = TrigDihedralType
elif dihedral == RbDihedral:
convertfunc = convert_dihedral_from_RB_to_trig
# Sign convention from psi to phi.
params['C1'] *= -1
params['C3'] *= -1
params['C5'] *= -1
converted_dihedral = TrigDihedral
elif dihedral == FourierDihedralType:
convertfunc = convert_dihedral_from_fourier_to_trig
converted_dihedral = TrigDihedralType
elif dihedral == FourierDihedral:
convertfunc = convert_dihedral_from_fourier_to_trig
converted_dihedral = TrigDihedral
elif dihedral in (ImproperHarmonicDihedralType, ImproperHarmonicDihedral,
TrigDihedralType, TrigDihedral,
BendingTorsionDihedralType, BendingTorsionDihedral,
RestrictedBendingDihedralType, RestrictedBendingDihedral
):
convertfunc = convert_nothing
converted_dihedral = dihedral
else:
raise GromacsError('Unable to convert dihedral: {0}'.format(dihedral))
params = convertfunc(params)
return converted_dihedral, params
else:
if isinstance(dihedral, TrigDihedral):
if dihedral.improper:
d_type = '4'
paramlist = convert_dihedral_from_trig_to_proper(params)
else:
if (params['phi'].value_in_unit(units.degrees) in [0, 180] and
params['fc6']._value == 0):
d_type = '3'
params = convert_dihedral_from_trig_to_RB(params)
# Sign convention from phi to psi.
params['C1'] *= -1
params['C3'] *= -1
params['C5'] *= -1
paramlist = [params]
else:
# Print as proper dihedral. If one nonzero term, as a
# type 1, if multiple, type 9.
paramlist = convert_dihedral_from_trig_to_proper(params)
if len(paramlist) == 1:
d_type = '1'
else:
d_type = '9'
else:
try:
d_type = self.lookup_gromacs_dihedrals[dihedral.__class__]
except KeyError:
raise UnsupportedFunctional(dihedral, ENGINE)
paramlist = [params]
return d_type, paramlist
def choose_parameter_kwds_from_forces(self, entries, n_atoms, force_type,
gromacs_force):
"""Extract a force's parameters into a keyword dictionary.
Args:
entries (str): The `split()` line being parsed.
n_atoms (int): The number of atoms in the force.
force_type: The type of the force.
gromacs_force: The
Returns:
kwds (dict): The force's parameters, e.g.
{'length': Quantity(value=0.13, unit=nanometers),
'k': ...
}
"""
n_entries = len(entries)
gromacs_force_type = gromacs_force.__base__ # what's the base class
typename = gromacs_force_type.__name__
u = self.unitvars[typename]
params = self.paramlist[typename]
kwds = dict()
if n_entries > n_atoms + 2:
for i, p in enumerate(params):
kwds[p] = float(entries[n_atoms + 1 + i]) * u[i]
elif n_entries in [n_atoms + 1 or n_atoms + 2]:
# Check to see if the force is defined exists
if isinstance(force_type, gromacs_force_type):
force_type_params = self.get_parameter_list_from_force(force_type)
# Note: for now, not passing the bonding variables.
for i, p in enumerate(params):
kwds[p] = force_type_params[i]
else:
logger.warning("No forcetype defined for: {0}".format(entries))
return kwds
paramlist = ff.build_paramlist('gromacs')
unitvars = ff.build_unitvars('gromacs', paramlist)
def create_kwds_from_entries(self, entries, force_class, offset=0):
return ff.create_kwds_from_entries(self.unitvars, self.paramlist,
entries, force_class, offset=offset)
def get_parameter_list_from_force(self, force):
return ff.get_parameter_list_from_force(force, self.paramlist)
def get_parameter_kwds_from_force(self, force):
return ff.get_parameter_kwds_from_force(
force, self.get_parameter_list_from_force, self.paramlist)
class TopMoleculeType(object):
"""Inner class to store information about a molecule type."""
def __init__(self):
self.nrexcl = -1
self.atoms = []
self.bonds = []
self.angles = []
self.dihedrals = []
self.rigidwaters = []
self.exclusions = []
self.pairs = []
self.cmaps = []
self.virtuals = defaultdict(list)
def __init__(self, top_file, gro_file, system=None, include_dir=None, defines=None):
"""Initializes the parser with all required metadata.
Args:
defines: Sets of default defines to use while parsing.
"""
self.top_filename = top_file
self.gro_file = gro_file
if not system:
system = System()
self.system = system
if include_dir is None:
include_dir = default_gromacs_include_dir()
self.include_dirs = (os.path.dirname(top_file), include_dir)
# Most of the gromacs water itp files for different forcefields,
# unless the preprocessor #define FLEXIBLE is given, don't define
# bonds between the water hydrogen and oxygens, but only give the
# constraint distances and exclusions.
self.defines = dict()
if defines is not None:
self.defines.update(defines)
def read(self):
"""Load the files into InterMol's abstract representation.
Returns:
system
"""
self.current_directive = None
self.if_stack = list()
self.else_stack = list()
self.molecule_types = OrderedDict()
self.molecules = list()
self.current_molecule_type = None
self.current_molecule = None
self.bondtypes = dict()
self.angletypes = dict()
self.dihedraltypes = dict()
self.implicittypes = dict()
self.pairtypes = dict()
self.cmaptypes = dict()
self.nonbondedtypes = dict()
# Parse the top_filename into a set of plain text, intermediate
# TopMoleculeType objects.
self.process_file(self.top_filename)
# Open the corresponding gro file and push all the information to the
# InterMol system.
self.gro = GromacsGroParser(self.gro_file)
self.gro.read()
self.system.box_vector = self.gro.box_vector
self.system.n_atoms = self.gro.positions.shape[0]
self.system.n_molecules = self.molecules
self.n_atoms_added = 0
for mol_name, mol_count in self.molecules:
if mol_name not in self.molecule_types:
raise GromacsError("Unknown molecule type: {0}".format(mol_name))
# Grab the relevent plain text molecule type.
top_moltype = self.molecule_types[mol_name]
self.create_moleculetype(top_moltype, mol_name, mol_count)
return self.system
# =========== System writing =========== #
def write(self):
"""Write this topology in GROMACS file format.
Args:
filename: the name of the file to write out to
"""
gro = GromacsGroParser(self.gro_file)
gro.write(self.system)
with open(self.top_filename, 'w') as top:
self.write_defaults(top)
self.write_atomtypes(top)
if self.system.nonbonded_types:
self.write_nonbonded_types(top)
self.write_moleculetypes(top)
self.write_system(top)
self.write_molecules(top)
def write_defaults(self, top):
top.write('[ defaults ]\n')
top.write('; nbfunc comb-rule gen-pairs fudgeLJ fudgeQQ\n')
top.write('{0:6d} {1:6s} {2:6s} {3:8.6f} {4:8.6f}\n\n'.format(
self.system.nonbonded_function,
self.lookup_gromacs_combination_rules[self.system.combination_rule],
self.system.genpairs,
self.system.lj_correction,
self.system.coulomb_correction))
def write_atomtypes(self, top):
top.write('[ atomtypes ]\n')
top.write(';type, bondingtype, atomic_number, mass, charge, ptype, sigma, epsilon\n')
for atomtype in sorted(self.system.atomtypes.values(), key=lambda x: x.atomtype):
if atomtype.atomtype.isdigit():
atomtype.atomtype = "LMP_{0}".format(atomtype.atomtype)
if atomtype.bondtype.isdigit():
atomtype.bondtype = "LMP_{0}".format(atomtype.bondtype)
top.write('{0:<11s} {1:5s} {2:6d} {3:18.8f} {4:18.8f} {5:5s}'.format(
atomtype.atomtype,
atomtype.bondtype,
int(atomtype.atomic_number),
atomtype.mass.value_in_unit(units.atomic_mass_unit),
atomtype.charge.value_in_unit(units.elementary_charge),
atomtype.ptype))
if self.system.combination_rule == 'Multiply-C6C12':
top.write('{0:18.8e} {1:18.8e}\n'.format(
atomtype.sigma.value_in_unit(units.kilojoules_per_mole * units.nanometers**(6)),
atomtype.epsilon.value_in_unit(units.kilojoules_per_mole * units.nanometers**(12))))
elif self.system.combination_rule in ['Lorentz-Berthelot','Multiply-Sigeps']:
top.write('{0:18.8e} {1:18.8e}\n'.format(
atomtype.sigma.value_in_unit(units.nanometers),
atomtype.epsilon.value_in_unit(units.kilojoules_per_mole)))
top.write('\n')
def write_nonbonded_types(self, top):
top.write('[ nonbond_params ]\n')
top.write(';i j func sigma epsilon\n')
for nbtype in sorted(self.system.nonbonded_types.values(), key=lambda x: (x.atom1, x.atom2)):
# TODO: support for buckingham NB types
top.write('{0:6s} {1:6s} {2:3d}'.format(
nbtype.atom1, nbtype.atom2, nbtype.type))
if self.system.combination_rule == 'Multiply-C6C12':
top.write('{0:18.8e} {1:18.8e}\n'.format(
nbtype.C6.value_in_unit(units.kilojoules_per_mole * units.nanometers**(6)),
nbtype.C12.value_in_unit(units.kilojoules_per_mole * units.nanometers**(12))))
elif self.system.combination_rule in ['Lorentz-Berthelot', 'Multiply-Sigeps']:
top.write('{0:18.8e} {1:18.8e}\n'.format(
nbtype.sigma.value_in_unit(units.nanometers),
nbtype.epsilon.value_in_unit(units.kilojoules_per_mole)))
top.write('\n')
def write_moleculetypes(self, top):
for mol_name, mol_type in self.system.molecule_types.items():
self.current_molecule_type = mol_type
top.write('[ moleculetype ]\n')
# Gromacs can't handle spaces in the molecule name.
printname = mol_name
printname = printname.replace(' ', '_')
printname = printname.replace('"', '')
top.write('{0:s} {1:10d}\n\n'.format(printname, mol_type.nrexcl))
self.write_atoms(top)
if self.current_molecule_type.pair_forces:
self.write_pairs(top)
if self.current_molecule_type.bond_forces and not self.current_molecule_type.rigidwaters:
self.write_bonds(top)
if self.current_molecule_type.angle_forces and not self.current_molecule_type.rigidwaters:
self.write_angles(top)
if self.current_molecule_type.dihedral_forces:
self.write_dihedrals(top)
if self.current_molecule_type.virtual_forces:
self.write_virtual_sites(top)
if self.current_molecule_type.rigidwaters:
self.write_rigidwaters(top)
if self.current_molecule_type.exclusions:
self.write_exclusions(top)
def write_system(self, top):
top.write('[ system ]\n')
top.write('{0}\n\n'.format(self.system.name))
def write_molecules(self, top):
top.write('[ molecules ]\n')
top.write('; Compound nmols\n')
for mol_name, mol_type in self.system.molecule_types.items():
n_molecules = len(mol_type.molecules)
# The following lines are more 'chemical'.
printname = mol_name
printname = printname.replace(' ', '_')
printname = printname.replace('"', '')
top.write('{0:<15s} {1:8d}\n'.format(printname, n_molecules))
def write_atoms(self, top):
top.write('[ atoms ]\n')
top.write(';num, type, resnum, resname, atomname, cgnr, q, m\n')
# Start iterating the set to get the first entry (somewhat kludgy...)
for i, atom in enumerate(next(iter(self.current_molecule_type.molecules)).atoms):
if atom.name.isdigit(): # LAMMPS atom names can have digits
atom.name = "LMP_{0}".format(atom.name)
if atom.atomtype[0].isdigit():
atom.atomtype[0] = "LMP_{0}".format(atom.atomtype[0])
top.write('{0:6d} {1:18s} {2:6d} {3:8s} {4:8s} {5:6d} '
'{6:18.8f} {7:18.8f}'.format(
i + 1,
atom.atomtype[0],
atom.residue_index,
atom.residue_name,
atom.name,
atom.cgnr,
atom.charge[0].value_in_unit(units.elementary_charge),
atom.mass[0].value_in_unit(units.atomic_mass_unit)))
# Alternate states -- only one for now.
if atom.atomtype.get(1):
top.write('{0:18s} {1:18.8f} {2:18.8f}'.format(
atom.atomtype[1],
atom.charge[1].value_in_unit(units.elementary_charge),
atom.mass[1].value_in_unit(units.atomic_mass_unit)))
top.write('\n')
top.write('\n')
def write_pairs(self, top):
top.write('[ pairs ]\n')
top.write('; ai aj funct\n')
pairlist = sorted(self.current_molecule_type.pair_forces,
key=lambda x: (x.atom1, x.atom2))
for pair in pairlist:
p_type = self.lookup_gromacs_pairs[pair.__class__]
if p_type:
# Gromacs type is the first character
top.write('{0:6d} {1:7d} {2:4d}'.format(
pair.atom1, pair.atom2, int(p_type[0])))
pair_params = self.get_parameter_list_from_force(pair)
# Don't want to write over actual array.
param_units = list(self.unitvars[pair.__class__.__name__])
if p_type[0] == '2' and pair.scaleQQ:
# We have a scaleQQ as well, which has no units.
pair_params.insert(0, pair.scaleQQ)
param_units.insert(0, units.dimensionless)
for i, param in enumerate(pair_params):
top.write("{0:18.8e}".format(
param.value_in_unit(param_units[i])))
top.write('\n')
else:
logger.warning("Found unsupported pair type {0}".format(
pair.__class__.__name__))
top.write('\n')
def write_virtual_sites(self, top):
virtuals = defaultdict(list)
for force in self.current_molecule_type.virtual_forces:
if hasattr(force, 'atom5'):
virtuals[4].append(force)
elif hasattr(force, 'atom4'):
virtuals[3].append(force)
else:
virtuals[2].append(force)
virtuals[2] = sorted(virtuals[2], key=lambda x: (x.atom1, x.atom2, x.atom3))
virtuals[3] = sorted(virtuals[3], key=lambda x: (x.atom1, x.atom2, x.atom3, x.atom4))
virtuals[4] = sorted(virtuals[4], key=lambda x: (x.atom1, x.atom2, x.atom3, x.atom4, x.atom5))
for n_body_type, vsites in virtuals.items():
top.write('[ virtual_sites{0} ]\n'.format(n_body_type))
top.write(';from atoms({0}) func params\n'.format(n_body_type))
for vsite in vsites:
for n in range(1, n_body_type + 2):
atom = getattr(vsite, 'atom{}'.format(n))
top.write('{0:7d} '.format(atom))
top.write('{:4s}'.format(self.lookup_gromacs_virtuals[vsite.__class__][-1]))
vsite_params = self.get_parameter_list_from_force(vsite)
param_units = self.unitvars[vsite.__class__.__name__]
for param, unit in zip(vsite_params, param_units):
top.write('{0:18.8e}'.format(param.value_in_unit(unit)))
top.write('\n')
top.write('\n')
def write_bonds(self, top):
top.write('[ bonds ]\n')
top.write('; ai aj funct r k\n')
bondlist = sorted(self.current_molecule_type.bond_forces,
key=lambda x: (x.atom1, x.atom2))
for bond in bondlist:
bond_params = self.get_parameter_list_from_force(bond)
b_type, bond_params = self.canonical_bond(bond_params, bond, direction='from')
top.write('{0:7d} {1:7d} {2:4s}'.format(
bond.atom1, bond.atom2, b_type))
param_units = self.unitvars[bond.__class__.__name__]
for param, param_unit in zip(bond_params, param_units):
top.write('{0:18.8e}'.format(param.value_in_unit(param_unit)))
top.write('\n')
top.write('\n')
def write_angles(self, top):
top.write('[ angles ]\n')
top.write('; ai aj ak funct theta cth\n')
anglelist = sorted(self.current_molecule_type.angle_forces,
key=lambda x: (x.atom1, x.atom2, x.atom3))
for angle in anglelist:
angle_params = self.get_parameter_list_from_force(angle)
a_type, angle_params = self.canonical_angle(angle_params, angle, direction='from')
top.write('{0:7d} {1:7d} {2:7d} {3:4s}'.format(
angle.atom1, angle.atom2, angle.atom3, a_type))
param_units = self.unitvars[angle.__class__.__name__]
for param, param_unit in zip(angle_params, param_units):
top.write('{0:18.8e}'.format(param.value_in_unit(param_unit)))
top.write('\n')
top.write('\n')
def write_dihedrals(self, top):
top.write('[ dihedrals ]\n')
top.write('; i j k l func\n')
dihedrallist = sorted(self.current_molecule_type.dihedral_forces,
key=lambda x: (x.atom1, x.atom2, x.atom3, x.atom4))
for dihedral in dihedrallist:
atoms = dihedral.atom1, dihedral.atom2, dihedral.atom3, dihedral.atom4
top.write("{0:7d} {1:7d} {2:7d} {3:7d}".format(
atoms[0], atoms[1], atoms[2], atoms[3]))
kwds = self.get_parameter_kwds_from_force(dihedral)
d_type, paramlist = self.canonical_dihedral(kwds, dihedral, direction='from')
converted_dihedral = self.gromacs_dihedrals[d_type](*atoms, **paramlist[0])
top.write("{0:6d}".format(int(d_type)))
paramlist = self.get_parameter_list_from_force(converted_dihedral)
param_units = self.unitvars[converted_dihedral.__class__.__name__]
for param, param_unit in zip(paramlist, param_units):
top.write('{0:18.8e}'.format(param.value_in_unit(param_unit)))
top.write('\n')
top.write('\n')
def write_rigidwaters(self, top):
for rigidwater in self.current_molecule_type.rigidwaters:
top.write('[ settles ]\n')
top.write('; i funct dOH dHH\n')
s_type = 1
# gromacs only uses the first atom of the rigid waters as the O, expects the others follow in sequence
top.write('{0:6d} {1:6d} {2:18.8f} {3:18.8f}\n'.format(
rigidwater.atom1,
s_type,
rigidwater.dOH.value_in_unit(units.nanometers),
rigidwater.dHH.value_in_unit(units.nanometers)))
top.write('\n')
def write_exclusions(self, top):
top.write('[ exclusions ]\n')
exclusionlist = sorted(self.current_molecule_type.exclusions,
key=lambda x: (x[0], x[1]))
for exclusion in exclusionlist:
top.write('{0:7d} {1:7d}\n'.format(exclusion[0], exclusion[1]))
top.write('\n')
# =========== System creation =========== #
def create_moleculetype(self, top_moltype, mol_name, mol_count):
# Check if the moleculetype already exists
if self.system.molecule_types.get(mol_name):
self.current_molecule_type = self.system.molecule_types[mol_name]
else:
# Create an intermol moleculetype.
moltype = MoleculeType(mol_name)
moltype.nrexcl = top_moltype.nrexcl
self.system.add_molecule_type(moltype)
self.current_molecule_type = moltype
# Create all the intermol molecules of the current type.
for n_mol in range(mol_count):
self.create_molecule(top_moltype, mol_name)
for pair in top_moltype.pairs:
self.create_pair(pair)
for bond in top_moltype.bonds:
self.create_bond(bond)
for angle in top_moltype.angles:
self.create_angle(angle)
for dihedral in top_moltype.dihedrals:
self.create_dihedral(dihedral)
for rigidwater in top_moltype.rigidwaters:
self.create_rigidwater(rigidwater)
for exclusion in top_moltype.exclusions:
self.create_exclusion(exclusion)
for vsite_type, vsites in top_moltype.virtuals.items():
for vsite in vsites:
self.create_virtual_site(vsite, vsite_type)
def create_molecule(self, top_moltype, mol_name):
molecule = Molecule(mol_name)
self.system.add_molecule(molecule)
self.current_molecule = molecule
for atom in top_moltype.atoms:
self.create_atom(atom)
def create_atom(self, temp_atom):
index = self.n_atoms_added + 1
atomtype = temp_atom[1]
#res_id = int(temp_atom[2])
res_id = self.gro.residue_ids[self.n_atoms_added]
#res_name = temp_atom[3]
res_name = self.gro.residue_names[self.n_atoms_added]
atom_name = temp_atom[4]
cgnr = int(temp_atom[5])
charge = float(temp_atom[6]) * units.elementary_charge
if len(temp_atom) in [8, 11]:
mass = float(temp_atom[7]) * units.amu
else:
mass = -1 * units.amu
atom = Atom(index, atom_name, res_id, res_name)
atom.cgnr = cgnr
atom.atomtype = (0, atomtype)
atom.charge = (0, charge)
atom.mass = (0, mass)
if len(temp_atom) == 11:
atomtype = temp_atom[8]
charge = float(temp_atom[9]) * units.elementary_charge
mass = float(temp_atom[10]) * units.amu
atom.atomtype = (1, atomtype)
atom.charge = (1, charge)
atom.mass = (1, mass)
atom.position = self.gro.positions[self.n_atoms_added]
atom.velocity = self.gro.velocities[self.n_atoms_added]
for state, atomtype in atom.atomtype.items():
intermol_atomtype = self.system.atomtypes.get(atomtype)
if not intermol_atomtype:
logger.warning('A corresponding AtomType for {0} was not'
' found.'.format(atom))
continue
atom.atomic_number = intermol_atomtype.atomic_number
if not atom.bondingtype:
if intermol_atomtype.bondtype:
atom.bondingtype = intermol_atomtype.bondtype
else:
atom.bondingtype = atomtype
if atom.mass.get(state)._value < 0:
if intermol_atomtype.mass._value >= 0:
atom.mass = (state, intermol_atomtype.mass)
else:
logger.warning("Suspicious mass parameter found for atom "
"{0}. Visually inspect before using.".format(atom))
atom.sigma = (state, intermol_atomtype.sigma)
atom.epsilon = (state, intermol_atomtype.epsilon)
self.current_molecule.add_atom(atom)
self.n_atoms_added += 1
def create_bond(self, bond):
n_atoms = 2
numeric_bondtype = bond[n_atoms]
atoms = [int(n) for n in bond[:n_atoms]]
btypes = tuple([self.lookup_atom_bondingtype(int(x))
for x in bond[:n_atoms]])
# Get forcefield parameters.
if len(bond) == n_atoms + 1:
bond_type = self.find_forcetype(btypes, self.bondtypes)
else:
bond[0] = btypes[0]
bond[1] = btypes[1]
bond = " ".join(bond)
bond_type = self.process_forcetype(btypes, 'bond', bond, n_atoms,
self.gromacs_bond_types, self.canonical_bond)
bond = bond.split()
# Create the actual force.
if numeric_bondtype in self.gromacs_bonds:
gromacs_bond = self.gromacs_bonds[numeric_bondtype]
# Connection bonds don't have bondtypes.
if gromacs_bond == ConnectionBond:
kwds = dict()
else:
kwds = self.choose_parameter_kwds_from_forces(
bond, n_atoms, bond_type, gromacs_bond)
# Give it canonical form parameters.
canonical_bond, kwds = self.canonical_bond(kwds, gromacs_bond,
direction='into')
new_bond = canonical_bond(*atoms, **kwds)
else:
logger.warning("Unsupported Gromacs bondtype: {0}".format(numeric_bondtype))
if not new_bond:
logger.warning("Undefined bond formatting.")
else:
self.current_molecule_type.bond_forces.add(new_bond)
def create_pair(self, pair):
"""Create a pair force object based on a [ pairs ] entry"""
n_entries = len(pair)
numeric_pairtype = pair[2]
atoms = [int(pair[0]), int(pair[1])]
atomtypes = tuple([self.lookup_atom_atomtype(int(pair[0])),
self.lookup_atom_atomtype(int(pair[1]))])
if n_entries == 3:
pairtype = self.find_forcetype(atomtypes, self.pairtypes)
else:
atomtypes = [None, None]
pairvars = [atoms[0], atoms[1], atomtypes[0], atomtypes[1]]
optpairvars = dict()
if numeric_pairtype == '1':
if self.system.combination_rule == "Multiply-C6C12":
thispair = LjCPair
elif self.system.combination_rule in ['Multiply-Sigeps', 'Lorentz-Berthelot']:
thispair = LjSigepsPair
thispairtype = thispair.__base__ # what's the base class
u = self.unitvars[thispairtype.__name__]
if n_entries > 3:
pairvars.extend([float(pair[3]) * u[0], float(pair[4]) * u[1]])
elif n_entries == 3:
if not pairtype:
# assume the values will be created by system defaults
thispair = LjDefaultPair
else:
assert isinstance(pairtype, thispairtype)
pairvars.extend(self.get_parameter_list_from_force(pairtype))
new_pair = thispair(*pairvars)
elif numeric_pairtype == '2':
if self.system.combination_rule == "Multiply-C6C12":
thispair = LjqCPair
elif self.system.combination_rule in ['Multiply-Sigeps', 'Lorentz-Berthelot']:
thispair = LjqSigepsPair
thispairtype = thispair.__base__ # what's the parent?
u = self.unitvars[thispairtype.__name__]
if n_entries > 3:
pairvars.extend([float(pair[4]) * u[0], float(pair[5]) * u[1],
float(pair[6]) * u[2], float(pair[7]) * u[3]])
# Generate a default filled dictionary, then fill in the pair.
optpairvars = ff.optforceparams('pair')
optpairvars['scaleQQ'] = float(pair[3]) * units.dimensionless
elif n_entries == 3:
if not pairtype:
# Assume the values will be created by system defaults.
thispair = LjqDefaultPair
else:
assert isinstance(pairtype, thispairtype)
# Bring the data from this pairtype.
optpairvars['scaleQQ'] = pairtype.scaleQQ
pairvars.extend(self.get_parameter_list_from_force(pairtype))
new_pair = thispair(*pairvars, **optpairvars)
else:
logger.warning("Unsupported Gromacs pairtype: {0}".format(
numeric_pairtype))
if not new_pair:
logger.warning("Undefined pair formatting.")
else:
self.current_molecule_type.pair_forces.add(new_pair)
def create_rigidwater(self, rigidwater):
# gromacs just assumes the first atom is the oxygen.
atom1 = int(rigidwater[0])
atom2 = atom1 + 1
atom3 = atom1 + 2
new_rigidwater = RigidWater(atom1, atom2, atom3,
float(rigidwater[2]) * units.nanometers,
float(rigidwater[3]) * units.nanometers)
self.current_molecule_type.rigidwaters.add(new_rigidwater)
waterbondrefk = 900*units.kilojoules_per_mole * units.nanometers**(-2)
wateranglerefk = 400*units.kilojoules_per_mole * units.degrees**(-2)
angle = 2.0 * math.asin(0.5 * float(rigidwater[3]) / float(rigidwater[2])) * units.radians
dOH = float(rigidwater[2]) * units.nanometers
new_bond = HarmonicBond(atom1, atom2, None, None, dOH, waterbondrefk, c=True)
self.current_molecule_type.bond_forces.add(new_bond)
new_bond = HarmonicBond(atom1, atom3, None, None, dOH, waterbondrefk, c=True)
self.current_molecule_type.bond_forces.add(new_bond)
new_angle = HarmonicAngle(atom2, atom1, atom3, None, None, None, angle, wateranglerefk, c=True)
self.current_molecule_type.angle_forces.add(new_angle)
def create_exclusion(self, exclusion):
first = exclusion[0]
for index in exclusion:
if first < index:
self.current_molecule_type.exclusions.add((int(first), int(index)))
def create_virtual_site(self, vsite, n_body_type):
n_entries = len(vsite)
n_atoms = int(n_body_type) + 1
numeric_vsite_type = vsite[n_atoms]
force_lookup_key = '{}-{}'.format(n_body_type, numeric_vsite_type)
VSite = self.gromacs_virtuals[force_lookup_key]
VSiteType = VSite.__base__
atoms = [int(n) for n in vsite[:n_atoms]]
btypes = [None] * n_atoms # TODO: Unused? Can we remove from classes?
params = self.create_kwds_from_entries(vsite, VSiteType, offset=n_atoms + 1)
# Can't unpack multiple args in Python <3.5 so combine into one.
atoms.extend(btypes)
new_vsite = VSite(*atoms, **params)
self.current_molecule_type.virtual_forces.add(new_vsite)
def create_angle(self, angle):
n_atoms = 3
atoms = [int(n) for n in angle[:n_atoms]]
btypes = tuple([self.lookup_atom_bondingtype(int(x))
for x in angle[:n_atoms]])
numeric_angletype = angle[n_atoms]
# Get forcefield parameters.
if len(angle) == n_atoms + 1:
angle_type = self.find_forcetype(btypes, self.angletypes)
else:
angle[0] = btypes[0]
angle[1] = btypes[1]
angle[2] = btypes[2]
angle = " ".join(angle)
angle_type = self.process_forcetype(btypes, 'angle', angle, n_atoms,
self.gromacs_angle_types, self.canonical_angle)
angle = angle.split()
# Create the actual force.
if numeric_angletype in self.gromacs_angles:
gromacs_angle = self.gromacs_angles[numeric_angletype]
kwds = self.choose_parameter_kwds_from_forces(
angle, n_atoms, angle_type, gromacs_angle)
# Give it canonical form parameters.
canonical_angle, kwds = self.canonical_angle(kwds, gromacs_angle,
direction='into')
new_angle = canonical_angle(*atoms, **kwds)
else:
logger.warning("Unsupported Gromacs angletype: {0}".format(numeric_angletype))
if not new_angle:
logger.warning("Undefined angle formatting.")
else:
self.current_molecule_type.angle_forces.add(new_angle)
def create_dihedral(self, dihedral):
"""Create a dihedral object based on a [ dihedrals ] entry. """
n_entries = len(dihedral)
n_atoms = 4
atoms = [int(i) for i in dihedral[0:n_atoms]]
numeric_dihedraltype = dihedral[n_atoms]
improper = numeric_dihedraltype in ['2', '4']
dihedral_types = [None]
if n_entries == n_atoms + 1:
btypes = [self.lookup_atom_bondingtype(int(x))
for x in dihedral[:n_atoms]]
# Use the returned btypes that we get a match with!
dihedral_types = self.find_dihedraltype(btypes, improper=improper)
# This dihedraltype has been found before and already converted.
if numeric_dihedraltype in ['1', '3', '4', '5', '9']:
gromacs_dihedral = TrigDihedral
else:
gromacs_dihedral = self.gromacs_dihedrals[numeric_dihedraltype]
elif n_entries == n_atoms + 2:
# This case handles special dihedral given via a #define.
if self.defines.get(dihedral[-1]):
params = self.defines[dihedral[-1]].split()
dihedral = dihedral[:-1] + params
gromacs_dihedral = self.gromacs_dihedrals[numeric_dihedraltype]
else:
# Some gromacs parameters don't include sufficient entries for all
# types, so add some zeros. A bit of a kludge...
dihedral += ['0.0'] * 3
gromacs_dihedral = self.gromacs_dihedrals[numeric_dihedraltype]
for d_type in dihedral_types:
kwds = self.choose_parameter_kwds_from_forces(
dihedral, n_atoms, d_type, gromacs_dihedral)
canonical_dihedral, kwds = self.canonical_dihedral(
kwds, gromacs_dihedral, direction="into")
kwds['improper'] = improper
new_dihedral = canonical_dihedral(*atoms, **kwds)
self.current_molecule_type.dihedral_forces.add(new_dihedral)
def find_dihedraltype(self, bondingtypes, improper):
"""Determine the type of dihedral interaction between four atoms. """
a1, a2, a3, a4 = bondingtypes
# All possible ways to match a dihedraltype
atom_orders = [[a1, a2, a3, a4], # original order
[a4, a3, a2, a1], # flip it
[a1, a2, a3, 'X'], # single wildcard 1
['X', a3, a2, a1], # flipped single wildcard 1
[a4, a3, a2, 'X'], # flipped single wildcard 2
['X', a2, a3, a4], # single wildcard 2
['X', a2, a3, 'X'], # double wildcard
['X', 'X', a3, a4], # front end double wildcard
[a1, a2, 'X', 'X'], # rear end double wildcard
['X', 'X', a2, a1], # rear end double wildcard
[a1, 'X', 'X', a4], # middle double wildcard
['X', a3, a2, 'X'], # flipped double wildcard
[a4, a3, 'X', 'X'], # flipped front end double wildcard
[a4, 'X', 'X', a1], # flipped middle double wildcard
]
dihedral_types = set()
for i, atoms in enumerate(atom_orders):
a1, a2, a3, a4 = atoms
key = tuple([a1, a2, a3, a4, improper])
dihedral_type = self.dihedraltypes.get(key)
if dihedral_type:
for to_be_added in dihedral_type:
for already_added in dihedral_types:
if not self.type_parameters_are_unique(to_be_added,
already_added):
break
else: # The loop completed without breaking.
dihedral_types.add(to_be_added)
break
if not dihedral_types:
logger.warning("Lookup failed for dihedral: {0}".format(bondingtypes))
return []
else:
return list(dihedral_types)
@staticmethod
def type_parameters_are_unique(a, b):
"""Check if two force types are unique.
Currently only tests TrigDihedralType and ImproperHarmonicDihedralType
because these are the only two forcetypes that we currently allow to
to have multiple values for the same set of 4 atom bondingtypes.
"""
if (isinstance(a, TrigDihedralType) and
isinstance(b, TrigDihedralType)):
return not (a.fc0 == b.fc0 and
a.fc1 == b.fc1 and
a.fc2 == b.fc2 and
a.fc3 == b.fc3 and
a.fc4 == b.fc4 and
a.fc5 == b.fc5 and
a.fc6 == b.fc6 and
a.improper == b.improper and
a.phi == b.phi)
elif (isinstance(a, ImproperHarmonicDihedralType) and
isinstance(b, ImproperHarmonicDihedralType)):
return not (a.xi == b.xi and
a.k == b.k and
a.improper == b.improper)
else:
return True
def lookup_atom_bondingtype(self, index):
return self.current_molecule.atoms[index - 1].bondingtype
def lookup_atom_atomtype(self, index, state=0):
return self.current_molecule.atoms[index - 1].atomtype[state]
def find_forcetype(self, bondingtypes, types_of_kind):
forcetype = types_of_kind.get(bondingtypes)
if not forcetype:
forcetype = types_of_kind.get(bondingtypes[::-1])
if not forcetype:
logger.debug("Lookup failed for atom bonding types'{0}' in {1}".format(
bondingtypes, types_of_kind.keys()))
return forcetype
# =========== Pre-processing and forcetype creation =========== #
def process_file(self, top_filename):
append = ''
with open(top_filename) as top_file:
for line in top_file:
if line.strip().endswith('\\'):
append = '{0} {1}'.format(append, line[:line.rfind('\\')])
else:
self.process_line(top_filename, '{0} {1}'.format(append, line))
append = ''
def process_line(self, top_filename, line):
"""Process one line from a file."""
if ';' in line:
line = line[:line.index(';')]
stripped = line.strip()
ignore = not all(self.if_stack)
if stripped.startswith('*') or len(stripped) == 0:
# A comment or empty line.
return
elif stripped.startswith('[') and not ignore:
# The start of a category.
if not stripped.endswith(']'):
raise GromacsError('Illegal line in .top file: '+line)
self.current_directive = stripped[1:-1].strip()
logger.debug("Parsing {0}...".format(self.current_directive))
elif stripped.startswith('#'):
# A preprocessor command.
fields = stripped.split()
command = fields[0]
if len(self.if_stack) != len(self.else_stack):
raise GromacsError('#if/#else stack out of sync')
if command == '#include' and not ignore:
# Locate the file to include
name = stripped[len(command):].strip(' \t"<>')
search_dirs = self.include_dirs+(os.path.dirname(top_filename),)
for sub_dir in search_dirs:
top_filename = os.path.join(sub_dir, name)
if os.path.isfile(top_filename):
# We found the file, so process it.
self.process_file(top_filename)
break
else:
raise GromacsError('Could not locate #include file: {}\n\n'
'Did you add the GROMACS share directory'
' to "GMXDATA"?'.format(name))
elif command == '#define' and not ignore:
# Add a value to our list of defines.
if len(fields) < 2:
raise GromacsError('Illegal line in .top file: '+line)
name = fields[1]
value_start = stripped.find(name, len(command))+len(name)+1
value = line[value_start:].strip()
self.defines[name] = value
elif command == '#ifdef':
# See whether this block should be ignored.
if len(fields) < 2:
raise GromacsError('Illegal line in .top file: '+line)
name = fields[1]
self.if_stack.append(name in self.defines)
self.else_stack.append(False)
elif command == '#ifndef':
# See whether this block should be ignored.
if len(fields) < 2:
raise GromacsError('Illegal line in .top file: '+line)
name = fields[1]
self.if_stack.append(name not in self.defines)
self.else_stack.append(False)
elif command == '#endif':
# Pop an entry off the if stack
if len(self.if_stack) == 0:
raise GromacsError('Unexpected line in .top file: '+line)
del(self.if_stack[-1])
del(self.else_stack[-1])
elif command == '#else':
# Reverse the last entry on the if stack
if len(self.if_stack) == 0:
raise GromacsError('Unexpected line in .top file: '+line)
if self.else_stack[-1]:
raise GromacsError('Unexpected line in .top file: #else has'
' already been used ' + line)
self.if_stack[-1] = (not self.if_stack[-1])
self.else_stack[-1] = True
elif not ignore:
# A line of data for the current category
if self.current_directive is None:
raise GromacsError('Unexpected line in .top file: "{0}"'.format(line))
if self.current_directive == 'defaults':
self.process_defaults(line)
elif self.current_directive == 'moleculetype':
self.process_moleculetype(line)
elif self.current_directive == 'molecules':
self.process_molecule(line)
elif self.current_directive == 'atoms':
self.process_atom(line)
elif self.current_directive == 'bonds':
self.process_bond(line)
elif self.current_directive == 'angles':
self.process_angle(line)
elif self.current_directive == 'dihedrals':
self.process_dihedral(line)
elif self.current_directive == 'settles':
self.process_settle(line)
elif self.current_directive == 'exclusions':
self.process_exclusion(line)
elif self.current_directive == 'pairs':
self.process_pair(line)
elif self.current_directive == 'cmap':
self.process_cmap(line)
elif self.current_directive == 'atomtypes':
self.process_atomtype(line)
elif self.current_directive == 'bondtypes':
self.process_bondtype(line)
elif self.current_directive == 'angletypes':
self.process_angletype(line)
elif self.current_directive == 'dihedraltypes':
self.process_dihedraltype(line)
elif self.current_directive == 'implicit_genborn_params':
self.process_implicittype(line)
elif self.current_directive == 'pairtypes':# and not self.system.genpairs:
self.process_pairtype(line)
elif self.current_directive == 'cmaptypes':
self.process_cmaptype(line)
elif self.current_directive == 'nonbond_params':
self.process_nonbond_params(line)
elif self.current_directive.startswith('virtual_sites'):
vsite_type = self.current_directive[-1]
self.process_virtual_sites(line, vsite_type)
def process_defaults(self, line):
"""Process the [ defaults ] line."""
fields = line.split()
if len(fields) < 4:
self.too_few_fields(line)
self.system.nonbonded_function = int(fields[0])
self.system.combination_rule = self.gromacs_combination_rules[fields[1]]
self.system.genpairs = fields[2]
self.system.lj_correction = float(fields[3])
self.system.coulomb_correction = float(fields[4])
def process_moleculetype(self, line):
"""Process a line in the [ moleculetypes ] category."""
fields = line.split()
if len(fields) < 1:
self.too_few_fields(line)
mol_type = self.TopMoleculeType()
mol_type.nrexcl = int(fields[1])
self.molecule_types[fields[0]] = mol_type
self.current_molecule_type = mol_type
def process_molecule(self, line):
"""Process a line in the [ molecules ] category."""
fields = line.split()
if len(fields) < 2:
self.too_few_fields(line)
self.molecules.append((fields[0], int(fields[1])))
def process_atom(self, line):
"""Process a line in the [ atoms ] category."""
if self.current_molecule_type is None:
self.directive_before_moleculetype()
fields = line.split()
if len(fields) < 5:
self.too_few_fields(line)
if len(fields) not in [7, 8, 11]:
self.invalid_line(line)
self.current_molecule_type.atoms.append(fields)
def process_bond(self, line):
"""Process a line in the [ bonds ] category."""
if self.current_molecule_type is None:
self.directive_before_moleculetype()
fields = line.split()
if len(fields) < 3:
self.too_few_fields(line)
self.current_molecule_type.bonds.append(fields)
def process_angle(self, line):
"""Process a line in the [ angles ] category."""
if self.current_molecule_type is None:
self.directive_before_moleculetype()
fields = line.split()
if len(fields) < 4:
self.too_few_fields(line)
self.current_molecule_type.angles.append(fields)
def process_dihedral(self, line):
"""Process a line in the [ dihedrals ] category."""
if self.current_molecule_type is None:
self.directive_before_moleculetype()
fields = line.split()
if len(fields) < 5:
self.too_few_fields(line)
self.current_molecule_type.dihedrals.append(fields)
def process_settle(self, line):
"""Process a line in the [ settles ] category."""
if self.current_molecule_type is None:
self.directive_before_moleculetype()
fields = line.split()
if len(fields) < 4:
self.too_few_fields(line)
self.current_molecule_type.rigidwaters.append(fields)
def process_exclusion(self, line):
"""Process a line in the [ exclusions ] category."""
if self.current_molecule_type is None:
self.directive_before_moleculetype()
fields = line.split()
if len(fields) < 2:
self.too_few_fields(line)
self.current_molecule_type.exclusions.append(fields)
def process_pair(self, line):
"""Process a line in the [ pairs ] category."""
if self.current_molecule_type is None:
self.directive_before_moleculetype()
fields = line.split()
if len(fields) < 3:
self.too_few_fields(line)
self.current_molecule_type.pairs.append(fields)
def process_cmap(self, line):
"""Process a line in the [ cmaps ] category."""
if self.current_molecule_type is None:
self.directive_before_moleculetype('cmap')
fields = line.split()
if len(fields) < 6:
self.too_few_fields(line)
self.current_molecule_type.cmaps.append(fields)
def process_atomtype(self, line):
"""Process a line in the [ atomtypes ] category."""
fields = line.split()
if len(fields) < 6:
self.too_few_fields(line)
if len(fields[3]) == 1:
# Bonded type and atomic number are both missing.
fields.insert(1, None)
fields.insert(1, None)
elif len(fields[4]) == 1 and len(fields[5]) >= 1:
if fields[1][0].isalpha():
# Atomic number is missing.
fields.insert(2, None)
else:
# Bonded type is missing.
fields.insert(1, None)
atomtype = fields[0]
if fields[1] == None:
bondingtype = atomtype
else:
bondingtype = fields[1]
if fields[2]:
atomic_number = int(fields[2])
else:
atomic_number = -1
mass = float(fields[3]) * units.amu
charge = float(fields[4]) * units.elementary_charge
ptype = fields[5]
# Add correct units to the LJ parameters.
if self.system.combination_rule == "Multiply-C6C12":
lj_param1 = (float(fields[6]) *
units.kilojoules_per_mole * units.nanometers**(6))
lj_param2 = (float(fields[7]) *
units.kilojoules_per_mole * units.nanometers**(12))
AtomtypeClass = AtomCType
elif self.system.combination_rule in ['Multiply-Sigeps', 'Lorentz-Berthelot']:
lj_param1 = float(fields[6]) * units.nanometers # sigma
lj_param2 = float(fields[7]) * units.kilojoules_per_mole # epsilon
AtomtypeClass = AtomSigepsType
else:
raise InterMolError("Unknown combination rule: {0}".format(self.system.combination_rule))
new_atom_type = AtomtypeClass(atomtype, bondingtype, atomic_number,
mass, charge, ptype, lj_param1, lj_param2)
self.system.add_atomtype(new_atom_type)
def process_bondtype(self, line):
"""Process a line in the [ bondtypes ] category."""
fields = line.split()
if len(fields) < 5:
self.too_few_fields(line)
btypes = fields[:2]
bond_type = self.process_forcetype(btypes, 'bond', line, 2,
self.gromacs_bond_types, self.canonical_bond)
self.bondtypes[tuple(fields[:2])] = bond_type
def process_angletype(self, line):
"""Process a line in the [ angletypes ] category."""
fields = line.split()
if len(fields) < 6:
self.too_few_fields(line)
btypes = fields[:3]
angle_type = self.process_forcetype(btypes, 'angle', line, 3,
self.gromacs_angle_types, self.canonical_angle)
self.angletypes[tuple(fields[:3])] = angle_type
def process_dihedraltype(self, line):
"""Process a line in the [ dihedraltypes ] category."""
fields = line.split()
if len(fields) < 5:
self.too_few_fields(line)
# Some gromacs parameters don't include sufficient numbers of types.
# Add some zeros (bit of a kludge).
line += ' 0.0 0.0 0.0'
fields = line.split()
# Check whether they are using 2 or 4 atom types
if fields[2].isdigit():
btypes = ['X', fields[0], fields[1], 'X']
n_atoms_specified = 2
elif fields[4].isdigit() and not fields[3].isdigit(): # assumes gromacs types are not all digits.
btypes = fields[:4]
n_atoms_specified = 4
else:
# TODO: Come up with remaining cases (are there any?) and a proper
# failure case.
logger.warning('Should never have gotten here.')
dihedral_type = self.process_forcetype(
btypes, 'dihedral', line, n_atoms_specified,
self.gromacs_dihedral_types, self.canonical_dihedral)
# Still need a bit more information
numeric_dihedraltype = fields[n_atoms_specified]
dihedral_type.improper = numeric_dihedraltype in ['2', '4']
key = tuple([btypes[0], btypes[1], btypes[2], btypes[3],
dihedral_type.improper])
if key in self.dihedraltypes:
# There are multiple dihedrals defined for these atom types.
self.dihedraltypes[key].add(dihedral_type)
else:
self.dihedraltypes[key] = {dihedral_type}
def process_forcetype(self, bondingtypes, forcename, line, n_atoms,
gromacs_force_types, canonical_force):
""" """
fields = line.split()
numeric_forcetype = fields[n_atoms]
gromacs_force_type = gromacs_force_types[numeric_forcetype]
kwds = self.create_kwds_from_entries(fields, gromacs_force_type, offset=n_atoms+1)
CanonicalForceType, kwds = canonical_force(
kwds, gromacs_force_type, direction='into')
force_type = CanonicalForceType(*bondingtypes, **kwds)
if not force_type:
logger.warning("{0} is not a supported {1} type".format(fields[2], forcename))
return
else:
return force_type
def process_implicittype(self, line):
"""Process a line in the [ implicit_genborn_params ] category."""
fields = line.split()
if len(fields) < 6:
self.too_few_fields(line)
self.implicittypes[fields[0]] = fields
def process_pairtype(self, line):
"""Process a line in the [ pairtypes ] category."""
fields = line.split()
if len(fields) < 5:
self.too_few_fields(line)
pair_type = None
PairFunc = None
combination_rule = self.system.combination_rule
kwds = dict()
numeric_pairtype = fields[2]
if numeric_pairtype == '1':
# LJ/Coul. 1-4 (Type 1)
if len(fields) == 5:
if combination_rule == "Multiply-C6C12":
PairFunc = LjCPairType
elif combination_rule in ['Multiply-Sigeps', 'Lorentz-Berthelot']:
PairFunc = LjSigepsPairType
offset = 3
elif numeric_pairtype == '2':
if combination_rule == "Multiply-C6C12":
PairFunc = LjqCPairType
elif combination_rule in ['Multiply-Sigeps', 'Lorentz-Berthelot']:
PairFunc = LjqSigepsPairType
offset = 4
else:
logger.warning("Could not find pair type for line: {0}".format(line))
if PairFunc:
pairvars = [fields[0], fields[1]]
kwds = self.create_kwds_from_entries(fields, PairFunc, offset=offset)
# kludge because of placement of scaleQQ...
if numeric_pairtype == '2':
# try to get this out ...
kwds['scaleQQ'] = float(fields[3]) * units.dimensionless
pair_type = PairFunc(*pairvars, **kwds)
self.pairtypes[tuple(fields[:2])] = pair_type
def process_cmaptype(self, line):
"""Process a line in the [ cmaptypes ] category."""
fields = line.split()
if len(fields) < 8 or len(fields) < 8+int(fields[6])*int(fields[7]):
self.too_few_fields(line)
self.cmaptypes[tuple(fields[:5])] = fields
def process_nonbond_params(self, line):
"""Process a line in the [ nonbond_param ] category."""
fields = line.split()
natoms = 2
nonbonded_type = None
NonbondedFunc = None
combination_rule = self.system.combination_rule
if fields[2] == '1':
if combination_rule == 'Multiply-C6C12':
NonbondedFunc = LjCNonbondedType
elif combination_rule in ['Lorentz-Berthelot', 'Multiply-Sigeps']:
NonbondedFunc = LjSigepsNonbondedType
elif fields[2] == '2':
if combination_rule == 'Buckingham':
NonbondedFunc = BuckinghamNonbondedType
else:
logger.warning("Could not find nonbonded type for line: {0}".format(line))
nonbonded_vars = [fields[0], fields[1]]
kwds = self.create_kwds_from_entries(fields, NonbondedFunc, offset=3)
nonbonded_type = NonbondedFunc(*nonbonded_vars, **kwds)
# TODO: figure out what to do with the gromacs numeric type
nonbonded_type.type = int(fields[2])
self.system.nonbonded_types[tuple(nonbonded_vars)] = nonbonded_type
def process_virtual_sites(self, line, v_site_type):
"""Process a line in a [ virtual_sites? ] category."""
if v_site_type == 'n':
raise UnimplementedSetting('Parsing of [ virtual_sitesn ] directives'
' is not yet implemented')
fields = line.split()
self.current_molecule_type.virtuals[v_site_type].append(fields)
# =========== Pre-processing errors =========== #
def too_few_fields(self, line):
raise GromacsError('Too few fields in [ {0} ] line: {1}'.format(
self.current_directive, line))
def invalid_line(self, line):
raise GromacsError('Invalid format in [ {0} ] line: {1}'.format(
self.current_directive, line))
def directive_before_moleculetype(self):
raise GromacsError('Found [ {0} ] directive before [ moleculetype ]'.format(
self.current_directive))
|
ctk3b/InterMol
|
intermol/gromacs/gromacs_parser.py
|
Python
|
mit
| 67,409
|
[
"Gromacs",
"LAMMPS"
] |
5d709685629abb0c674585ebce30b8cc5498da6da8aaa3a8ea2d5a815d316fcc
|
# synapse_tutorial.py ---
#
# Filename: synapse_tutorial.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Fri Jan 17 09:43:51 2014 (+0530)
# Version:
# Last-Updated: Thu Oct 2 11:27:05 IST 2014
# By: Upi
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# This is a tutorial based on an example Upi suggested. The code is
# exported from an ipython notebook and the comments present the
# markdown version of the tutorial text.
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.# -*- coding: utf-8 -*-
#
#
# Code:
import moose
import random # We need this for random number generation
from numpy import random as nprand
import moose
import random # We need this for random number generation
from numpy import random as nprand
def main():
"""
In this example we walk through creation of a vector of IntFire
elements and setting up synaptic connection between them. Synapse on
IntFire elements is an example of ElementField - elements that do not
exist on their own, but only as part of another element. This example
also illustrates various operations on `vec` objects and
ElementFields.
"""
size = 1024 # number of IntFire objects in a vec
delayMin = 0
delayMax = 4
Vmax = 1.0
thresh = 0.8
refractoryPeriod = 0.4
connectionProbability = 0.1
weightMax = 0.5
# The above sets the constants we shall use in this example. Now we create a vector of IntFire elements of size `size`.
net = moose.IntFire('/network', size)
# This creates a `vec` of `IntFire` elements of size 1024 and returns the first `element`, i.e. "/network[0]".
net = moose.element('/network[0]')
# You need now to provide synaptic input to the network
synh = moose.SimpleSynHandler( '/network/synh', size )
# These need to be connected to the nodes in the network
moose.connect( synh, 'activationOut', net, 'activation', 'OneToOne' )
# You can access the underlying vector of elements using the `vec` field on any element. This is very useful for vectorized field access:
net.vec.Vm = [thresh / 2.0] * size
# The right part of the assigment creates a Python list of length `size` with each element set to `thresh/2.0`, which is 0.4. You can index into the `vec` to access individual elements' field:
print((net.vec[1].Vm))
# `SimpleSynHandler` class has an `ElementField` called `synapse`. It is just like a `vec` above in terms of field access, but by default its size is 0.
print((len(synh.synapse)))
# To actually create synapses, you can explicitly assign the `num` field of this, or set the `numSynapses` field of the `IntFire` element. There are some functions which can implicitly set the size of the `ElementField`.
synh.numSynapses = 3
print((len(synh.synapse)))
synh.synapse.num = 4
print((len(synh.synapse)))
# Now you can index into `net.synapse` as if it was an array.
print(('Before:', synh.synapse[0].delay))
synh.synapse[0].delay = 1.0
print(('After:', synh.synapse[0].delay))
# You could do the same vectorized assignment as with `vec` directly:
synh.synapse.weight = [0.2] * len(synh.synapse)
print((synh.synapse.weight))
# You can create the synapses and assign the weights and delays using loops:
for syn in synh.vec:
syn.synapse.num = random.randint(1,10) # create synapse fields with random size between 1 and 10, end points included
# Below is one (inefficient) way of setting the individual weights of the elements in 'synapse'
for ii in range(len(syn.synapse)):
syn.synapse[ii].weight = random.random() * weightMax
# This is a more efficient way - rhs of `=` is list comprehension in Python and rather fast
syn.synapse.delay = [delayMin + random.random() * delayMax for ii in range(len(syn.synapse))]
# An even faster way will be to use numpy.random.rand(size) which produces array of random numbers uniformly distributed between 0 and 1
syn.synapse.delay = delayMin + nprand.rand(len(syn.synapse)) * delayMax
# Now display the results, we use slice notation on `vec` to show the values of delay and weight for the first 5 elements in `/network`
for syn in synh.vec[:5]:
print(('Delays for synapses on ', syn.path, ':', syn.synapse.delay))
print(('Weights for synapses on ', syn.path, ':', syn.synapse.weight))
if __name__ == '__main__':
main()
#
# synapse_tutorial.py ends here
|
BhallaLab/moose-examples
|
snippets/synapse_tutorial.py
|
Python
|
gpl-2.0
| 5,219
|
[
"MOOSE"
] |
b49f7b165556a40376f1c10bc50296339b3dc2172dad7c0d20cc613b23374436
|
# coding: utf-8
# In[59]:
import numpy
import matplotlib # TODO bug in ipython? (sometime can't call the module directly)
from matplotlib import pyplot
import scipy
from scipy import stats
from scipy import optimize
# %matplotlib tk
# %matplotlib notebook #?
get_ipython().magic(u'matplotlib inline')
import math
def poissoniana(x, mu, area):
return area * (numpy.exp(-mu) * mu**x)/ (scipy.misc.factorial(x)) # if x >= 0
def esponenziale(x, alpha, mu, nu):
return numpy.exp(-alpha*(x-mu))+nu
# define unnormalized gaussian
# def generalGaussian(x, mu, sigma, area):
# return area * scipy.stats.norm.pdf(x, mu, sigma)
# conversion between x ticks and bin delimiters and viceversa
def binDelimitersFromX(x):
binSize = x[1] - x[0]
binDelimiters = numpy.append(x, x[-1]+binSize) - 0.5*binSize
return binDelimiters
def xFromBinDelimiters(binDelimiters):
binSize = binDelimiters[1] - binDelimiters[0]
x = numpy.delete(binDelimiters, -1) + 0.5*binSize
return x
# define a function to fit histogram's results
def fitHistogram(f, histogramResults, dictAdditionalArguments={}):
"""
f
function to fit the histogram
dictAdditionalArguments
dictionary of additional arguments to pass to scipy.optimize.curve_fit(...)
example: p0=startingValuesList --> {"p0": startingValues}
histogramResults
the result of matplotlib.pyplot.hist(...)
or results of numpy.histogram(...)
"""
# pay attention shift of half binSize
binDelimiters = histogramResults[1]
x = xFromBinDelimiters(binDelimiters)
y = histogramResults[0]
values, covariance = scipy.optimize.curve_fit(f, x, y, **dictAdditionalArguments)
# unpack: (TODO just like pointers?)
# *list
# *touple
# **dictionary
return values, covariance
# function to use in the fit
f = esponenziale
# prior parameters
muPrior = 130
nuPrior = 0
alphaPrior = .048
dataSample = numpy.genfromtxt("../data/DistrGrado_Wind",dtype='int')
sampleLenght = len(dataSample)
# binning
bins=2000
# TODO how many bins?
# vedere come l'accuratezza del fit
# varia con la grandezza della canalizzazione
# cercare canalizzazione ottimale:
# non troppo poco altrimenti si perdono dettagli sulla forma
# non troppo perché si va in bassa statistica e si introduce rumore
# vedere optimal sampling con analogo discreto
# della frequenza di Nyquist sui canali
# vedere numero di canali in funzione del numero di dati che si hanno
binDelimiters = numpy.linspace(min(dataSample), max(dataSample), bins)
binSize = binDelimiters[1] - binDelimiters[0]
area = sampleLenght*binSize
priorParameters = [alphaPrior, muPrior, nuPrior]
# initialize the figure
matplotlib.pyplot.figure(figsize=(20,12))
# create prior curve
x = xFromBinDelimiters(binDelimiters)
yPrior = f(x, *priorParameters)
matplotlib.pyplot.plot(x, yPrior, label="prior")
# make the histogram of the generated sample
histogramResults = matplotlib.pyplot.hist(dataSample, bins=binDelimiters, histtype="step", label="generated", align="mid")
# matplotlib.pyplot.axvline(x=min(dataSample))
# matplotlib.pyplot.axvline(x=max(dataSample))
# generalized fit
# values, covariance = fitHistogram(f, histogramResults, {"p0": priorParameters})
# TODO there's a problem in the gaussian fit documentation:
# muFit, sigmaFit = scipy.stats.norm.fit(dataSample) # area is missing
# plot fit results
print area
print values
print covariance
## create reconstructed curve
#yFit = f(x, *values)
#matplotlib.pyplot.plot(x, yFit, label="reconstructed")
matplotlib.pyplot.xlim(0, 200)
matplotlib.pyplot.ylim(0, 100)
# set the legend of the figure
matplotlib.pyplot.legend(loc='best', frameon=False)
# matplotlib.pyplot.show()
# matplotlib.pyplot.xscale('log')
# matplotlib.pyplot.yscale('log')
# la gaussiana è una parabola nel grafico log-log
# In[ ]:
# In[ ]:
|
FedericoMuciaccia/SistemiComplessi
|
src/fit Poisson.py
|
Python
|
mit
| 3,890
|
[
"Gaussian"
] |
365ea39c6e0fb46b919c04c44a5db6dc743becdf700c8ac380fa22676e8670f5
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-add-resources
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Add resources from the BDII database for a given VO
"""
__RCSID__ = "$Id$"
import signal
import pprint
import re
from DIRAC.Core.Base import Script
def processScriptSwitches():
global vo, dry, doCEs, doSEs
Script.registerSwitch( "V:", "vo=", "Virtual Organization" )
Script.registerSwitch( "D", "dry", "Dry run" )
Script.registerSwitch( "C", "ce", "Process Computing Elements" )
Script.registerSwitch( "S", "se", "Process Storage Elements" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile]' % Script.scriptName ] ) )
Script.parseCommandLine( ignoreErrors = True )
vo = ''
dry = False
doCEs = False
doSEs = False
for sw in Script.getUnprocessedSwitches():
if sw[0] in ( "V", "vo" ):
vo = sw[1]
if sw[0] in ( "D", "dry" ):
dry = True
if sw[0] in ( "C", "ce" ):
doCEs = True
if sw[0] in ( "S", "se" ):
doSEs = True
from DIRAC import gLogger, exit as DIRACExit, S_OK
from DIRAC.ConfigurationSystem.Client.Utilities import getGridCEs, getSiteUpdates, getCEsFromCS, \
getGridSRMs, getSRMUpdates
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getDIRACSiteName, getDIRACSesForSRM
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.Core.Utilities.Grid import ldapService, getBdiiSEInfo
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs
ceBdiiDict = None
def checkUnusedCEs():
global vo, dry, ceBdiiDict
gLogger.notice( 'looking for new computing resources in the BDII database...' )
result = getCEsFromCS()
if not result['OK']:
gLogger.error( 'ERROR: failed to get CEs from CS', result['Message'] )
DIRACExit( -1 )
knownCEs = result['Value']
result = getGridCEs( vo, ceBlackList = knownCEs )
if not result['OK']:
gLogger.error( 'ERROR: failed to get CEs from BDII', result['Message'] )
DIRACExit( -1 )
ceBdiiDict = result['BdiiInfo']
siteDict = result['Value']
if siteDict:
gLogger.notice( 'New resources available:\n' )
for site in siteDict:
diracSite = 'Unknown'
result = getDIRACSiteName( site )
if result['OK']:
diracSite = ','.join( result['Value'] )
ces = siteDict[site].keys()
if ces:
gLogger.notice( " %s, DIRAC site %s" % ( site, diracSite) )
for ce in ces:
gLogger.notice( ' '*4+ce )
gLogger.notice( ' %s, %s' % ( siteDict[site][ce]['CEType'], '%s_%s_%s' % siteDict[site][ce]['System'] ) )
else:
gLogger.notice( 'No new resources available, exiting' )
DIRACExit( 0 )
inp = raw_input( "\nDo you want to add sites ? [default=yes] [yes|no]: ")
inp = inp.strip()
if not inp and inp.lower().startswith( 'n' ):
gLogger.notice( 'Nothing else to be done, exiting' )
DIRACExit( 0 )
gLogger.notice( '\nAdding new sites/CEs interactively\n' )
sitesAdded = []
for site in siteDict:
# Get the country code:
country = ''
ces = siteDict[site].keys()
for ce in ces:
country = ce.strip().split('.')[-1].lower()
if len( country ) == 2:
break
if country == 'gov':
country = 'us'
break
if not country or len( country ) != 2:
country = 'xx'
result = getDIRACSiteName( site )
if not result['OK']:
gLogger.notice( '\nThe site %s is not yet in the CS, give it a name' % site )
diracSite = raw_input( '[help|skip|<domain>.<name>.%s]: ' % country )
if diracSite.lower() == "skip":
continue
if diracSite.lower() == "help":
gLogger.notice( '%s site details:' % site )
for k,v in ceBdiiDict[site].items():
if k != "CEs":
gLogger.notice( '%s\t%s' % (k,v) )
gLogger.notice( '\nEnter DIRAC site name in the form <domain>.<name>.%s\n' % country )
diracSite = raw_input( '[<domain>.<name>.%s]: ' % country )
try:
domain,siteName,country = diracSite.split('.')
except Exception, x:
gLogger.error( 'ERROR: DIRAC site name does not follow convention: %s' % diracSite )
continue
diracSites = [diracSite]
else:
diracSites = result['Value']
if len( diracSites ) > 1:
gLogger.notice( 'Attention! GOC site %s corresponds to more than one DIRAC sites:' % site )
gLogger.notice( str( diracSites ) )
gLogger.notice( 'Please, pay attention which DIRAC site the new CEs will join\n' )
newCEs = {}
addedCEs = []
for ce in ces:
ceType = siteDict[site][ce]['CEType']
for diracSite in diracSites:
if ce in addedCEs:
continue
yn = raw_input( "Add CE %s of type %s to %s? [default yes] [yes|no]: " % ( ce, ceType, diracSite ) )
if yn == '' or yn.lower() == 'y':
newCEs.setdefault( diracSite, [] )
newCEs[diracSite].append( ce )
addedCEs.append( ce )
for diracSite in diracSites:
if diracSite in newCEs:
cmd = "dirac-admin-add-site %s %s %s diractest.cfg" % ( diracSite, site, ' '.join( newCEs[diracSite] ) )
gLogger.notice( "\nNew site/CEs will be added with command:\n%s" % cmd )
yn = raw_input( "Add it ? [default yes] [yes|no]: " )
if not ( yn == '' or yn.lower() == 'y' ) :
continue
if dry:
gLogger.notice( "Command is skipped in the dry run" )
else:
result = shellCall( 0, cmd )
if not result['OK']:
gLogger.error( 'Error while executing dirac-admin-add-site command' )
yn = raw_input( "Do you want to continue ? [default no] [yes|no]: " )
if yn == '' or yn.lower().startswith( 'n' ):
if sitesAdded:
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
DIRACExit( 0 )
else:
exitStatus, stdData, errData = result[ 'Value' ]
if exitStatus:
gLogger.error( 'Error while executing dirac-admin-add-site command\n', '\n'.join( [stdData, errData] ) )
yn = raw_input( "Do you want to continue ? [default no] [yes|no]: " )
if yn == '' or yn.lower().startswith( 'n' ):
if sitesAdded:
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
DIRACExit( 0 )
else:
sitesAdded.append( ( site, diracSite ) )
gLogger.notice( stdData )
gLogger.notice( 'CEs were added at the following sites:' )
for site, diracSite in sitesAdded:
gLogger.notice( "%s\t%s" % ( site, diracSite ) )
def updateCS( changeSet ):
global vo, dry, ceBdiiDict
changeList = list( changeSet )
changeList.sort()
if dry:
gLogger.notice( 'The following needed changes are detected:\n' )
else:
gLogger.notice( 'We are about to make the following changes to CS:\n' )
for entry in changeList:
gLogger.notice( "%s/%s %s -> %s" % entry )
if not dry:
csAPI = CSAPI()
csAPI.initialize()
result = csAPI.downloadCSData()
if not result['OK']:
gLogger.error( 'Failed to initialize CSAPI object', result['Message'] )
DIRACExit( -1 )
for section, option, value, new_value in changeSet:
if value == 'Unknown' or not value:
csAPI.setOption( cfgPath( section, option ), new_value )
else:
csAPI.modifyValue( cfgPath( section, option ), new_value )
yn = raw_input( 'Do you want to commit changes to CS ? [default yes] [yes|no]: ' )
if yn == '' or yn.lower().startswith( 'y' ):
result = csAPI.commit()
if not result['OK']:
gLogger.error( "Error while commit to CS", result['Message'] )
else:
gLogger.notice( "Successfully committed %d changes to CS" % len( changeSet ) )
def updateSites():
global vo, dry, ceBdiiDict
result = getSiteUpdates( vo, bdiiInfo = ceBdiiDict )
if not result['OK']:
gLogger.error( 'Failed to get site updates', result['Message'] )
DIRACExit( -1 )
changeSet = result['Value']
updateCS( changeSet )
def checkUnusedSEs():
global vo, dry
result = getGridSRMs( vo, unUsed = True )
if not result['OK']:
gLogger.error( 'Failed to look up SRMs in BDII', result['Message'] )
siteSRMDict = result['Value']
# Evaluate VOs
result = getVOs()
if result['OK']:
csVOs = set( result['Value'] )
else:
csVOs = set( [vo] )
changeSetFull = set()
for site in siteSRMDict:
for gridSE in siteSRMDict[site]:
changeSet = set()
seDict = siteSRMDict[site][gridSE]['SE']
srmDict = siteSRMDict[site][gridSE]['SRM']
# Check the SRM version
version = srmDict.get( 'GlueServiceVersion', '' )
if not ( version and version.startswith( '2' ) ):
gLogger.debug( 'Skipping SRM service with version %s' % version )
continue
result = getDIRACSiteName( site )
if not result['OK']:
gLogger.notice( 'Unused se %s is detected at unused site %s' % ( gridSE, site ) )
gLogger.notice( 'Consider adding site %s to the DIRAC CS' % site )
continue
diracSites = result['Value']
yn = raw_input( '\nDo you want to add new SRM SE %s at site(s) %s ? default yes [yes|no]: ' % ( gridSE, str( diracSites ) ) )
if not yn or yn.lower().startswith( 'y' ):
if len( diracSites ) > 1:
prompt = 'Which DIRAC site the new SE should be attached to ?'
for i, s in enumerate( diracSites ):
prompt += '\n[%d] %s' % ( i, s )
prompt += '\nEnter your choice number: '
inp = raw_input( prompt )
try:
ind = int( inp )
except:
gLogger.notice( 'Can not interpret your choice: %s, try again later' % inp )
continue
diracSite = diracSites[ind]
else:
diracSite = diracSites[0]
domain, siteName, country = diracSite.split( '.' )
recName = '%s-disk' % siteName
inp = raw_input( 'Give a DIRAC name to the grid SE %s, default %s : ' % ( gridSE, recName ) )
diracSEName = inp
if not inp:
diracSEName = recName
gLogger.notice( 'Adding new SE %s at site %s' % ( diracSEName, diracSite ) )
seSection = cfgPath( '/Resources/StorageElements', diracSEName )
changeSet.add( ( seSection, 'BackendType', seDict.get( 'GlueSEImplementationName', 'Unknown' ) ) )
changeSet.add( ( seSection, 'Description', seDict.get( 'GlueSEName', 'Unknown' ) ) )
bdiiVOs = set( [ re.sub( '^VO:', '', rule ) for rule in srmDict.get( 'GlueServiceAccessControlBaseRule', [] ) ] )
seVOs = csVOs.intersection( bdiiVOs )
changeSet.add( ( seSection, 'VO', ','.join( seVOs ) ) )
accessSection = cfgPath( seSection, 'AccessProtocol.1' )
changeSet.add( ( accessSection, 'Protocol', 'srm' ) )
changeSet.add( ( accessSection, 'ProtocolName', 'SRM2' ) )
endPoint = srmDict.get( 'GlueServiceEndpoint', '' )
result = pfnparse( endPoint )
if not result['OK']:
gLogger.error( 'Can not get the SRM service end point. Skipping ...' )
continue
host = result['Value']['Host']
port = result['Value']['Port']
changeSet.add( ( accessSection, 'Host', host ) )
changeSet.add( ( accessSection, 'Port', port ) )
changeSet.add( ( accessSection, 'Access', 'remote' ) )
# Try to guess the Path
domain = '.'.join( host.split( '.' )[-2:] )
path = '/dpm/%s/home' % domain
changeSet.add( ( accessSection, 'Path', path ) )
changeSet.add( ( accessSection, 'SpaceToken', '' ) )
changeSet.add( ( accessSection, 'WSUrl', '/srm/managerv2?SFN=' ) )
gLogger.notice( 'SE %s will be added with the following parameters' )
changeList = list( changeSet )
changeList.sort()
for entry in changeList:
gLogger.notice( entry )
yn = raw_input( 'Do you want to add new SE %s ? default yes [yes|no]: ' % diracSEName )
if not yn or yn.lower().startswith( 'y' ):
changeSetFull = changeSetFull.union( changeSet )
if dry:
gLogger.notice( 'Skipping commit of the new SE data in a dry run' )
return S_OK()
if changeSetFull:
csAPI = CSAPI()
csAPI.initialize()
result = csAPI.downloadCSData()
if not result['OK']:
gLogger.error( 'Failed to initialize CSAPI object', result['Message'] )
DIRACExit( -1 )
changeList = list( changeSetFull )
changeList.sort()
for section, option, value in changeList:
csAPI.setOption( cfgPath( section, option ), value )
yn = raw_input( 'New SE data is accumulated\n Do you want to commit changes to CS ? default yes [yes|no]: ' )
if not yn or yn.lower().startswith( 'y' ):
result = csAPI.commit()
if not result['OK']:
gLogger.error( "Error while commit to CS", result['Message'] )
else:
gLogger.notice( "Successfully committed %d changes to CS" % len( changeSetFull ) )
return S_OK()
def updateSEs():
global vo, dry
result = getSRMUpdates( vo )
if not result['OK']:
gLogger.error( 'Failed to get SRM updates', result['Message'] )
DIRACExit( -1 )
changeSet = result['Value']
updateCS( changeSet )
def handler( signum, frame ):
gLogger.notice( ' Exit is forced, bye...' )
DIRACExit( -1 )
if __name__ == "__main__":
signal.signal( signal.SIGTERM, handler )
signal.signal( signal.SIGINT, handler )
vo = ''
dry = False
doCEs = False
doSEs = False
ceBdiiDict = None
processScriptSwitches()
if not vo:
gLogger.error( 'No VO specified' )
DIRACExit( -1 )
if doCEs:
yn = raw_input( 'Do you want to check/add new sites to CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
checkUnusedCEs()
yn = raw_input( 'Do you want to update CE details in the CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
updateSites()
if doSEs:
yn = raw_input( 'Do you want to check/add new storage elements to CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
result = checkUnusedSEs()
yn = raw_input( 'Do you want to update SE details in the CS ? [default yes] [yes|no]: ' )
yn = yn.strip()
if yn == '' or yn.lower().startswith( 'y' ):
updateSEs()
|
rajanandakumar/DIRAC
|
ConfigurationSystem/scripts/dirac-admin-add-resources.py
|
Python
|
gpl-3.0
| 15,563
|
[
"DIRAC"
] |
ba0936e403820dda0866ce631ee0b3332b70119955ecb9b6978e497390d06a2a
|
import lb_loader
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
import pandas as pd
import mdtraj as md
import pandas as pdb
import spack
import itertools
import numpy as np
from openmmtools.testsystems import build_lattice, generate_dummy_trajectory, LennardJonesFluid
nparticles = 4 * (4 ** 3)
xyz, box = build_lattice(nparticles)
traj = generate_dummy_trajectory(xyz, box)
traj.save("./out.pdb")
len(xyz)
testsystem = LennardJonesFluid(nparticles=nparticles, lattice=True)
system, positions = testsystem.system, testsystem.positions
temperature = 25*u.kelvin
integrator = hmc_integrators.HMCIntegrator(temperature, steps_per_hmc=25, timestep=1.0*u.femtoseconds)
context = lb_loader.build(system, integrator, positions, temperature)
state = context.getState(getPositions=True, getParameters=True)
xyz = state.getPositions(asNumpy=True) / u.nanometer
state.getPeriodicBoxVectors()
context.getState(getEnergy=True).getPotentialEnergy()
mm.LocalEnergyMinimizer.minimize(context)
context.getState(getEnergy=True).getPotentialEnergy()
integrator.step(1)
context.getState(getEnergy=True).getPotentialEnergy()
integrator.step(100)
context.getState(getEnergy=True).getPotentialEnergy()
|
kyleabeauchamp/HMCNotes
|
code/misc/test_lattice_fcp_testsystems.py
|
Python
|
gpl-2.0
| 1,252
|
[
"MDTraj",
"OpenMM"
] |
01ec7ec378f449da5afd81683c81536e57f7f100f7886fd3260474fcb7e78bc5
|
""" Test_RSS_Policy_Configurations
"""
__RCSID__ = '$Id: $'
import unittest
import DIRAC.ResourceStatusSystem.Policy.Configurations as moduleTested
################################################################################
class Configurations_TestCase( unittest.TestCase ):
def setUp( self ):
"""
Setup
"""
self.moduleTested = moduleTested
def tearDown( self ):
"""
TearDown
"""
del self.moduleTested
################################################################################
# Tests
class Configurations_Success( Configurations_TestCase ):
def test_policiesMeta( self ):
""" tests that the configuration does not have any funny key
"""
self.assertEqual( True, hasattr( self.moduleTested, 'POLICIESMETA' ) )
policiesMeta = self.moduleTested.POLICIESMETA
for _policyName, policyMeta in policiesMeta.items():
self.assertEqual( [ 'args', 'command', 'description', 'module' ], policyMeta.keys() )
################################################################################
################################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( Configurations_TestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( Configurations_Success ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
andresailer/DIRAC
|
ResourceStatusSystem/Policy/test/Test_RSS_Policy_Configurations.py
|
Python
|
gpl-3.0
| 1,509
|
[
"DIRAC"
] |
b40dcafadb1b61caf2f86ef0af4d59f9b9ed1421d2c76b6fc0c9b619b214e706
|
import matplotlib.pyplot
import scipy
import colorsys
import json
import sys
import os
import copy
matplotlib.pyplot.figure(1, figsize=(14,14))
fname = os.path.splitext(os.path.basename(sys.argv[1]))[0]
fin = open(sys.argv[1], "r")
jin = json.load(fin)
fin.close()
# 4 timer values are: total, min, max, average
timer_groups = [
[
"sirius::Global::generate_radial_functions",
"sirius::Global::generate_radial_integrals",
"sirius::K_set::find_eigen_states",
"sirius::Density::generate",
"sirius::Potential::generate_effective_potential",
"exciting::sym_rho_mag",
"exciting::mixer"
],
[
"sirius::Band::set_fv_h_o",
"sirius::Band::solve_fv_evp",
"sirius::K_point::generate_fv_states",
"sirius::Band::solve_sv",
"sirius::K_point::generate_spinor_wave_functions"
],
[
"sirius::Potential::poisson",
"sirius::Potential::xc"
],
[
"sirius::Reciprocal_lattice::init",
"sirius::Step_function::init",
"sirius::Unit_cell::get_symmetry",
"sirius::Unit_cell::find_nearest_neighbours",
"sirius::K_point::initialize",
"sirius::Potential::Potential",
"sirius::Atom_type::solve_free_atom"
]
]
for itg in range(len(timer_groups)):
timer_names = []
timer_values = []
total_time = 0.0
for timer_name in timer_groups[itg]:
if timer_name in jin["timers"]:
tname = timer_name
# effective potential is generated once before the scf loop
# the first timer is reported in percentage
if itg == 0:
if timer_name == "sirius::Potential::generate_effective_potential":
# (total - average) of effective potential / total of iterations
t = (jin["timers"][timer_name][0] - jin["timers"][timer_name][3]) / jin["timers"]["exciting::iteration"][0]
else:
t = jin["timers"][timer_name][0] / jin["timers"]["exciting::iteration"][0]
t = t * 100
# show average time in legend
timer_names.append(tname + " (%6.2f%%, %6.2f sec./call)"%(t, jin["timers"][timer_name][3]))
# show total time for initialization routines
elif itg == 3:
t = jin["timers"][timer_name][0]
timer_names.append(tname + " (%6.2f sec.)"%t)
# show average time
else:
t = jin["timers"][timer_name][3]
timer_names.append(tname + " (%6.2f sec./call)"%t)
timer_values.append(t)
total_time += t
print "total time for timer group ", itg, " ", total_time
plot = matplotlib.pyplot.subplot("41%i"%(itg+1))
box = plot.get_position()
plot.set_position([box.x0, box.y0, box.width * 0.1, box.height])
box = plot.get_position()
ytics = [0]
for i in range(len(timer_values)):
ytics.append(ytics[i] + timer_values[i])
plots = []
for i in range(len(timer_values)):
rgb = colorsys.hsv_to_rgb(i / float(len(timer_values)), 0.75, 0.95)
c = "#%X%X%X"%(rgb[0]*255, rgb[1]*255, rgb[2]*255)
plots.append(matplotlib.pyplot.bar(0, timer_values[i], 2, bottom=ytics[i], color=c))
matplotlib.pyplot.xticks([], ())
matplotlib.pyplot.yticks(ytics)
matplotlib.pyplot.ylim([0, ytics[len(ytics)-1]])
matplotlib.pyplot.legend(plots[::-1], timer_names[::-1], bbox_to_anchor=(1.2, 1), loc=2)
matplotlib.pyplot.savefig(fname+".pdf", format="pdf")
|
electronic-structure/sirius
|
apps/timers/timers2.py
|
Python
|
bsd-2-clause
| 3,540
|
[
"exciting"
] |
71582e569f3cf473898a7601e7608a215e85f1d01e1b802cf35404e7de13dd5f
|
import tweepy
import webbrowser
from ConfigParser import SafeConfigParser
# Call this function if you need to get access tokens
def getAccessTokens():
# Initialize the config parser and open the config.ini file
config = SafeConfigParser()
config.read('config.ini')
# Try to load the consumer keys. These tell Twitter which app we are.
# We need these before we request access to a user's account.
try:
consumerKey = config.get('auth', 'consumerKey')
consumerSecret = config.get('auth', 'consumerSecret')
except:
print "Error! Could not find consumer key or consumer secret"
# If the consumer keys are blank, ask the user to add them.
if consumerKey == '' or consumerSecret == '':
print "Error! Consumer keys are blank. Please add them to config.ini."
# Use the consumer keys to request a verifier PIN from Twitter.
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
try:
redirect_url = auth.get_authorization_url()
print redirect_url
webbrowser.open(redirect_url)
except tweepy.TweepError:
print 'Error! Failed getting request token from Twitter.'
# Ask the user to input the PIN that Twitter provides
verifier = raw_input('Visit the URL above and enter the Twitter PIN:')
# Try to get access tokens using the Twitter PIN
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print 'Error! Failed to get access token.'
# Save the access keys. These are blank if we failed to get them.
config = SafeConfigParser()
config.read('config.ini')
config.set('auth', 'accessKey', auth.access_token)
config.set('auth', 'accessSecret', auth.access_token_secret)
with open('config.ini', 'w') as f:
config.write(f)
# Call this function to post a tweet
def postTweet(tweetText):
# Open the config file
config = SafeConfigParser()
config.read('config.ini')
# Try to get the consumer keys. If we can't, add blank entries to the config
try:
consumerKey = config.get('auth', 'consumerKey')
consumerSecret = config.get('auth', 'consumerSecret')
except:
print "Error! config.ini must contain the consumer key and consumer secret."
config.add_section('auth')
config.set('auth', 'consumerKey', '')
config.set('auth', 'consumerSecret', '')
with open('config.ini', 'w') as f:
config.write(f)
# Now try to get the access keys. If we can't, try to get them.
try:
acessKey = config.get('auth', 'accessKey')
accessSecret = config.get('auth', 'accessSecret')
except:
getAccessTokens()
config.read('config.ini')
acessKey = config.get('auth', 'accessKey')
accessSecret = config.get('auth', 'accessSecret')
# Connect to the Twitter API
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(acessKey, accessSecret)
api = tweepy.API(auth)
# Post the Tweet
try:
api.update_status(tweetText)
except tweepy.TweepError:
print 'Error! Failed to post status update.'
|
trswany/topStocks
|
tweetPoster.py
|
Python
|
mit
| 3,159
|
[
"VisIt"
] |
f7fb9a8c11641ed0dac35cb3eeea80981ff6357186d0697a29caaffc99a09c35
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import BaseHTTPServer
import ConfigParser
import cookielib
import errno
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import webbrowser
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_PERFORCE.lower(): VCS_PERFORCE,
"p4": VCS_PERFORCE,
VCS_GIT.lower(): VCS_GIT,
VCS_CVS.lower(): VCS_CVS,
}
# OAuth 2.0-Related Constants
LOCALHOST_IP = '127.0.0.1'
DEFAULT_OAUTH2_PORT = 8001
ACCESS_TOKEN_PARAM = 'access_token'
OAUTH_PATH = '/get-access-token'
OAUTH_PATH_PORT_TEMPLATE = OAUTH_PATH + '?port=%(port)d'
AUTH_HANDLER_RESPONSE = """\
<html>
<head>
<title>Authentication Status</title>
</head>
<body>
<p>The authentication flow has completed.</p>
</body>
</html>
"""
# Borrowed from google-api-python-client
OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Your browser has been opened to visit:
%s
If your browser is on a different machine then exit and re-run
upload.py with the command-line parameter
--no_oauth2_webbrowser
"""
NO_OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Go to the following link in your browser:
%s
and copy the access token.
"""
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self._reason = args["Error"]
self.info = args.get("Info", None)
@property
def reason(self):
# reason is a property on python 2.7 but a member variable on <=2.6.
# self.args is modified so it cannot be used as-is so save the value in
# self._reason.
return self._reason
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None,
extra_headers=None, save_cookies=False,
account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new AbstractRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers or {}
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data, headers={"Accept": "text/plain"})
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
print >>sys.stderr, ''
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >>sys.stderr, (
"Use an application-specific password instead "
"of your regular account password.\n"
"See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
else:
print >>sys.stderr, "Invalid username or password."
elif e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
elif e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
elif e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
elif e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
elif e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
elif e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
elif e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
else:
# Unknown error.
raise
print >>sys.stderr, ''
continue
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
elif e.code >= 500:
ErrorExit(e.read())
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
if isinstance(self.auth_function, OAuth2Creds):
access_token = self.auth_function()
if access_token is not None:
self.extra_headers['Authorization'] = 'OAuth %s' % (access_token,)
self.authenticated = True
else:
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class CondensedHelpFormatter(optparse.IndentedHelpFormatter):
"""Frees more horizontal space by removing indentation from group
options and collapsing arguments between short and long, e.g.
'-o ARG, --opt=ARG' to -o --opt ARG"""
def format_heading(self, heading):
return "%s:\n" % heading
def format_option(self, option):
self.dedent()
res = optparse.HelpFormatter.format_option(self, option)
self.indent()
return res
def format_option_strings(self, option):
self.set_long_opt_delimiter(" ")
optstr = optparse.HelpFormatter.format_option_strings(self, option)
optlist = optstr.split(", ")
if len(optlist) > 1:
if option.takes_value():
# strip METAVAR from all but the last option
optlist = [x.split()[0] for x in optlist[:-1]] + optlist[-1:]
optstr = " ".join(optlist)
return optstr
parser = optparse.OptionParser(
usage=("%prog [options] [-- diff_options] [path...]\n"
"See also: http://code.google.com/p/rietveld/wiki/UploadPyUsage"),
add_help_option=False,
formatter=CondensedHelpFormatter()
)
parser.add_option("-h", "--help", action="store_true",
help="Show this help message and exit.")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--oauth2", action="store_true",
dest="use_oauth2", default=False,
help="Use OAuth 2.0 instead of a password.")
group.add_option("--oauth2_port", action="store", type="int",
dest="oauth2_port", default=DEFAULT_OAUTH2_PORT,
help=("Port to use to handle OAuth 2.0 redirect. Must be an "
"integer in the range 1024-49151, defaults to "
"'%default'."))
group.add_option("--no_oauth2_webbrowser", action="store_false",
dest="open_oauth2_local_webbrowser", default=True,
help="Don't open a browser window to get an access token.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-t", "--title", action="store", dest="title",
help="New issue subject or new patch set title")
group.add_option("-m", "--message", action="store", dest="message",
default=None,
help="New issue description or new patch set message")
group.add_option("-F", "--file", action="store", dest="file",
default=None, help="Read the message above from file.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base URL path for files (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("-p", "--send_patch", action="store_true",
dest="send_patch", default=False,
help="Same as --send_mail, but include diff as an "
"attachment, and prepend email subject with 'PATCH:'.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Git-specific
group = parser.add_option_group("Git-specific options")
group.add_option("--git_similarity", action="store", dest="git_similarity",
metavar="SIM", type="int", default=50,
help=("Set the minimum similarity index for detecting renames "
"and copies. See `git diff -C`. (default 50)."))
group.add_option("--git_no_find_copies", action="store_false", default=True,
dest="git_find_copies",
help=("Prevents git from looking for copies (default off)."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
# OAuth 2.0 Methods and Helpers
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters for an access token
and then stops serving.
"""
access_token = None
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters into the server's
access_token and then stops serving.
"""
def SetAccessToken(self):
"""Stores the access token from the request on the server.
Will only do this if exactly one query parameter was passed in to the
request and that query parameter used 'access_token' as the key.
"""
query_string = urlparse.urlparse(self.path).query
query_params = urlparse.parse_qs(query_string)
if len(query_params) == 1:
access_token_list = query_params.get(ACCESS_TOKEN_PARAM, [])
if len(access_token_list) == 1:
self.server.access_token = access_token_list[0]
def do_GET(self):
"""Handle a GET request.
Parses and saves the query parameters and prints a message that the server
has completed its lone task (handling a redirect).
Note that we can't detect if an error occurred.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.SetAccessToken()
self.wfile.write(AUTH_HANDLER_RESPONSE)
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def OpenOAuth2ConsentPage(server=DEFAULT_REVIEW_SERVER,
port=DEFAULT_OAUTH2_PORT):
"""Opens the OAuth 2.0 consent page or prints instructions how to.
Uses the webbrowser module to open the OAuth server side page in a browser.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
"""
path = OAUTH_PATH_PORT_TEMPLATE % {'port': port}
parsed_url = urlparse.urlparse(server)
scheme = parsed_url[0] or 'https'
if scheme != 'https':
ErrorExit('Using OAuth requires a review server with SSL enabled.')
# If no scheme was given on command line the server address ends up in
# parsed_url.path otherwise in netloc.
host = parsed_url[1] or parsed_url[2]
page = '%s://%s%s' % (scheme, host, path)
webbrowser.open(page, new=1, autoraise=True)
print OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
def WaitForAccessToken(port=DEFAULT_OAUTH2_PORT):
"""Spins up a simple HTTP Server to handle a single request.
Intended to handle a single redirect from the production server after the
user authenticated via OAuth 2.0 with the server.
Args:
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
The access token passed to the localhost server, or None if no access token
was passed.
"""
httpd = ClientRedirectServer((LOCALHOST_IP, port), ClientRedirectHandler)
# Wait to serve just one request before deferring control back
# to the caller of wait_for_refresh_token
httpd.handle_request()
return httpd.access_token
def GetAccessToken(server=DEFAULT_REVIEW_SERVER, port=DEFAULT_OAUTH2_PORT,
open_local_webbrowser=True):
"""Gets an Access Token for the current user.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_local_webbrowser: Boolean, defaults to True. If set, opens a page in
the user's browser.
Returns:
A string access token that was sent to the local server. If the serving page
via WaitForAccessToken does not receive an access token, this method
returns None.
"""
access_token = None
if open_local_webbrowser:
OpenOAuth2ConsentPage(server=server, port=port)
try:
access_token = WaitForAccessToken(port=port)
except socket.error, e:
print 'Can\'t start local webserver. Socket Error: %s\n' % (e.strerror,)
if access_token is None:
# TODO(dhermes): Offer to add to clipboard using xsel, xclip, pbcopy, etc.
page = 'https://%s%s' % (server, OAUTH_PATH)
print NO_OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
access_token = raw_input('Enter access token: ').strip()
return access_token
class KeyringCreds(object):
def __init__(self, server, host, email):
self.server = server
# Explicitly cast host to str to work around bug in old versions of Keyring
# (versions before 0.10). Even though newer versions of Keyring fix this,
# some modern linuxes (such as Ubuntu 12.04) still bundle a version with
# the bug.
self.host = str(host)
self.email = email
self.accounts_seen = set()
def GetUserCredentials(self):
"""Prompts the user for a username and password.
Only use keyring on the initial call. If the keyring contains the wrong
password, we want to give the user a chance to enter another one.
"""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
global keyring
email = self.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % self.server)
password = None
if keyring and not email in self.accounts_seen:
try:
password = keyring.get_password(self.host, email)
except:
# Sadly, we have to trap all errors here as
# gnomekeyring.IOError inherits from object. :/
print "Failed to get password from keyring"
keyring = None
if password is not None:
print "Using password from system keyring."
self.accounts_seen.add(email)
else:
password = getpass.getpass("Password for %s: " % email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(self.host, email, password)
self.accounts_seen.add(email)
return (email, password)
class OAuth2Creds(object):
"""Simple object to hold server and port to be passed to GetAccessToken."""
def __init__(self, server, port, open_local_webbrowser=True):
self.server = server
self.port = port
self.open_local_webbrowser = open_local_webbrowser
def __call__(self):
"""Uses stored server and port to retrieve OAuth 2.0 access token."""
return GetAccessToken(server=self.server, port=self.port,
open_local_webbrowser=self.open_local_webbrowser)
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE, use_oauth2=False,
oauth2_port=DEFAULT_OAUTH2_PORT,
open_oauth2_local_webbrowser=True):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
use_oauth2: Boolean indicating whether OAuth 2.0 should be used for
authentication.
oauth2_port: Integer, the port where the localhost server receiving the
redirect is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_oauth2_local_webbrowser: Boolean, defaults to True. If True and using
OAuth, this opens a page in the user's browser to obtain a token.
Returns:
A new HttpRpcServer, on which RPC calls can be made.
"""
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = HttpRpcServer(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
positional_args = [server]
if use_oauth2:
positional_args.append(
OAuth2Creds(server, oauth2_port, open_oauth2_local_webbrowser))
else:
positional_args.append(KeyringCreds(server, host, email).GetUserCredentials)
return HttpRpcServer(*positional_args,
host_override=host_override,
save_cookies=save_cookies,
account_type=account_type)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCodeAndStderr(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout, stderr and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (stdout, stderr, return code)
"""
logging.info("Running %s", command)
env = env.copy()
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, errout, p.returncode
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code."""
out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
universal_newlines, env)
return out, retcode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GetGUID(self):
"""Return string to distinguish the repository from others, for example to
query all opened review issues for it"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinaryData(self, data):
"""Returns true if data contains a null byte."""
# Derived from how Mercurial's heuristic, see
# http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229
return bool(data and "\0" in data)
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GetGUID(self):
return self._GetInfo("Repository UUID")
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
url = self._GetInfo("URL")
if url:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
# TODO(anatoli) - repository specific hacks should be handled by server
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def _GetInfo(self, key):
"""Parses 'svn info' for current dir. Returns value for key or None"""
for line in RunShell(["svn", "info"]).splitlines():
if line.startswith(key + ": "):
return line.split(":", 1)[1].strip()
def _EscapeFilename(self, filename):
"""Escapes filename for SVN commands."""
if "@" in filename and not filename.endswith("@"):
filename = "%s@" % filename
return filename
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals",
self._EscapeFilename(filename)])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start,
self._EscapeFilename(dirname) or "."]
out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
if returncode:
# Directory might not yet exist at start revison
# svn: Unable to find repository location for 'abc' in revision nnn
if re.match('^svn: Unable to find repository location for .+ in revision \d+', err):
old_files = ()
else:
ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
else:
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [self._EscapeFilename(dirname) or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type",
self._EscapeFilename(filename)], silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary:
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
# filename must not be escaped. We already add an ampersand here.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
# this test for binary is exactly the test prescribed by the
# official SVN docs at
# http://subversion.apache.org/faq.html#binary-files
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
mimetype not in ("image/x-xbitmap", "image/x-xpixmap"))
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", self._EscapeFilename(filename)],
universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GetGUID(self):
revlist = RunShell("git rev-list --parents HEAD".split()).splitlines()
# M-A: Return the 1st root hash, there could be multiple when a
# subtree is merged. In that case, more analysis would need to
# be done to figure out which HEAD is the 'most representative'.
for r in revlist:
if ' ' not in r:
return r
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if "GIT_EXTERNAL_DIFF" in env:
del env["GIT_EXTERNAL_DIFF"]
# -M/-C will not print the diff for the deleted file when a file is renamed.
# This is confusing because the original file will not be shown on the
# review when a file is renamed. So, get a diff with ONLY deletes, then
# append a diff (with rename detection), without deletes.
cmd = [
"git", "diff", "--no-color", "--no-ext-diff", "--full-index",
"--ignore-submodules",
]
diff = RunShell(
cmd + ["--no-renames", "--diff-filter=D"] + extra_args,
env=env, silent_ok=True)
if self.options.git_find_copies:
similarity_options = ["--find-copies-harder", "-l100000",
"-C%s" % self.options.git_similarity ]
else:
similarity_options = ["-M%s" % self.options.git_similarity ]
diff += RunShell(
cmd + ["--diff-filter=AMCRT"] + similarity_options + extra_args,
env=env, silent_ok=True)
# The CL could be only file deletion or not. So accept silent diff for both
# commands then check for an empty diff manually.
if not diff:
ErrorExit("No output from %s" % (cmd + extra_args))
return diff
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(
["git", "show", "HEAD:" + filename], silent_ok=True)
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
is_binary = self.IsBinaryData(base_content) or is_image
# Grab the before/after content if we need it.
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetGUID(self):
"""For now we don't know how to get repository ID for CVS"""
return
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, self.IsBinaryData(base_content), status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode in [0, 1]:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
data, retcode = RunShellWithReturnCode(["cvs", "diff"])
if retcode not in [0, 1]:
ErrorExit("Got error status from 'cvs diff':\n%s" % (data,))
unknown_files = []
for line in data.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def GetGUID(self):
# See chapter "Uniquely identifying a repository"
# http://hgbook.red-bean.com/read/customizing-the-output-of-mercurial.html
info = RunShell("hg log -r0 --template {node}".split())
return info.strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
absname = os.path.join(self.repo_dir, filename)
return os.path.relpath(absname)
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir,
# but "hg diff" has given us the path relative to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = self.IsBinaryData(base_content)
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or self.IsBinaryData(new_content)
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary:
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.title:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_title = description["desc"].strip()
lines = raw_title.splitlines()
if len(lines):
options.title = lines[0]
def GetGUID(self):
"""For now we don't know how to get repository ID for Perforce"""
return
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary:
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
def RunDetectCommand(vcs_type, command):
"""Helper to detect VCS by executing command.
Returns:
A pair (vcs, output) or None. Throws exception on error.
"""
try:
out, returncode = RunShellWithReturnCode(command)
if returncode == 0:
return (vcs_type, out.strip())
except OSError, (errcode, message):
if errcode != errno.ENOENT: # command not found code
raise
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
if res != None:
return res
# Subversion from 1.7 has a single centralized .svn folder
# ( see http://subversion.apache.org/docs/release-notes/1.7.html#wc-ng )
# That's why we use 'svn info' instead of checking for .svn dir
res = RunDetectCommand(VCS_SUBVERSION, ["svn", "info"])
if res != None:
return res
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
"--is-inside-work-tree"])
if res != None:
return res
# detect CVS repos use `cvs status && $? == 0` rules
res = RunDetectCommand(VCS_CVS, ["cvs", "status"])
if res != None:
return res
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
options, args = parser.parse_args(argv[1:])
if options.help:
if options.verbose < 2:
# hide Perforce options
parser.epilog = (
"Use '--help -v' to show additional Perforce options. "
"For more help, see "
"http://code.google.com/p/rietveld/wiki/CodeReviewHelp"
)
parser.option_groups.remove(parser.get_option_group('--p4_port'))
parser.print_help()
sys.exit(0)
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.use_oauth2:
options.save_cookies = False
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type,
options.use_oauth2,
options.oauth2_port,
options.open_oauth2_local_webbrowser)
form_fields = []
repo_guid = vcs.GetGUID()
if repo_guid:
form_fields.append(("repo_guid", repo_guid))
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
logging.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
# Process --message, --title and --file.
message = options.message or ""
title = options.title or ""
if options.file:
if options.message:
ErrorExit("Can't specify both message and message file options")
file = open(options.file, 'r')
message = file.read()
file.close()
if options.issue:
prompt = "Title describing this patch set: "
else:
prompt = "New issue subject: "
title = (
title or message.split('\n', 1)[0].strip() or raw_input(prompt).strip())
if not title and not options.issue:
ErrorExit("A non-empty title is required for a new issue")
# For existing issues, it's fine to give a patchset an empty name. Rietveld
# doesn't accept that so use a whitespace.
title = title or " "
if len(title) > 100:
title = title[:99] + '…'
if title and not options.issue:
message = message or title
form_fields.append(("subject", title))
# If it's a new issue send message as description. Otherwise a new
# message is created below on upload_complete.
if message and not options.issue:
form_fields.append(("description", message))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
if options.send_patch:
options.send_mail = True
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
payload = {} # payload for final request
if options.send_mail:
payload["send_mail"] = "yes"
if options.send_patch:
payload["attach_patch"] = "yes"
if options.issue and message:
payload["message"] = message
payload = urllib.urlencode(payload)
rpc_server.Send("/" + issue + "/upload_complete/" + (patchset or ""),
payload=payload)
return issue, patchset
def main():
try:
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
wishabi/caja-1
|
tools/upload.py
|
Python
|
apache-2.0
| 96,830
|
[
"VisIt"
] |
d0edeacea13ba3267505bf8da89b8afc8d0870227aea26641913832246726f71
|
"""Compatibility fixes for older versions of libraries
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
# originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
import inspect
from distutils.version import LooseVersion
from math import log
import os
from pathlib import Path
import warnings
import numpy as np
import scipy
from scipy import linalg
from scipy.linalg import LinAlgError
###############################################################################
# Misc
# helpers to get function arguments
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _safe_svd(A, **kwargs):
"""Wrapper to get around the SVD did not converge error of death"""
# Intel has a bug with their GESVD driver:
# https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501
# For SciPy 0.18 and up, we can work around it by using
# lapack_driver='gesvd' instead.
if kwargs.get('overwrite_a', False):
raise ValueError('Cannot set overwrite_a=True with this function')
try:
return linalg.svd(A, **kwargs)
except np.linalg.LinAlgError as exp:
from .utils import warn
if 'lapack_driver' in _get_args(linalg.svd):
warn('SVD error (%s), attempting to use GESVD instead of GESDD'
% (exp,))
return linalg.svd(A, lapack_driver='gesvd', **kwargs)
else:
raise
###############################################################################
# Backporting nibabel's read_geometry
def _get_read_geometry():
"""Get the geometry reading function."""
try:
import nibabel as nib
has_nibabel = True
except ImportError:
has_nibabel = False
if has_nibabel and LooseVersion(nib.__version__) > LooseVersion('2.1.0'):
from nibabel.freesurfer import read_geometry
else:
read_geometry = _read_geometry
return read_geometry
def _read_geometry(filepath, read_metadata=False, read_stamp=False):
"""Backport from nibabel."""
from .surface import _fread3, _fread3_many
volume_info = dict()
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file
nvert = _fread3(fobj)
nquad = _fread3(fobj)
(fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div
coords = coords.reshape(-1, 3)
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
#
# Face splitting follows
#
faces = np.zeros((2 * nquad, 3), dtype=np.int64)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface] = quad[0], quad[1], quad[3]
nface += 1
faces[nface] = quad[2], quad[3], quad[1]
nface += 1
else:
faces[nface] = quad[0], quad[1], quad[2]
nface += 1
faces[nface] = quad[0], quad[2], quad[3]
nface += 1
elif magic == TRIANGLE_MAGIC: # Triangle file
create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8')
fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
if read_metadata:
volume_info = _read_volume_info(fobj)
else:
raise ValueError("File does not appear to be a Freesurfer surface")
coords = coords.astype(np.float64) # XXX: due to mayavi bug on mac 32bits
ret = (coords, faces)
if read_metadata:
if len(volume_info) == 0:
warnings.warn('No volume information contained in the file')
ret += (volume_info,)
if read_stamp:
ret += (create_stamp,)
return ret
###############################################################################
# Triaging scipy.signal.windows.dpss (1.1)
def tridisolve(d, e, b, overwrite_b=True):
"""Symmetric tridiagonal system solver, from Golub and Van Loan p157.
.. note:: Copied from NiTime.
Parameters
----------
d : ndarray
main diagonal stored in d[:]
e : ndarray
superdiagonal stored in e[:-1]
b : ndarray
RHS vector
Returns
-------
x : ndarray
Solution to Ax = b (if overwrite_b is False). Otherwise solution is
stored in previous RHS vector b
"""
N = len(b)
# work vectors
dw = d.copy()
ew = e.copy()
if overwrite_b:
x = b
else:
x = b.copy()
for k in range(1, N):
# e^(k-1) = e(k-1) / d(k-1)
# d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)
t = ew[k - 1]
ew[k - 1] = t / dw[k - 1]
dw[k] = dw[k] - t * ew[k - 1]
# This iterative solver can fail sometimes. There is probably a
# graceful way to solve this, but it should only be a problem
# in very rare cases. Users of SciPy 1.1+ will never hit this anyway,
# so not worth spending more time figuring out how to do it faster.
if dw[N - 1] == 0:
a = np.diag(d) + np.diag(e[:-1], -1) + np.diag(e[:-1], 1)
x[:] = linalg.solve(a, b)
else:
for k in range(1, N):
x[k] = x[k] - ew[k - 1] * x[k - 1]
if dw[N - 1] != 0:
x[N - 1] = x[N - 1] / dw[N - 1]
for k in range(N - 2, -1, -1):
x[k] = x[k] / dw[k] - ew[k] * x[k + 1]
if not overwrite_b:
return x
def tridi_inverse_iteration(d, e, w, x0=None, rtol=1e-8):
"""Perform an inverse iteration.
This will find the eigenvector corresponding to the given eigenvalue
in a symmetric tridiagonal system.
..note:: Copied from NiTime.
Parameters
----------
d : ndarray
main diagonal of the tridiagonal system
e : ndarray
offdiagonal stored in e[:-1]
w : float
eigenvalue of the eigenvector
x0 : ndarray
initial point to start the iteration
rtol : float
tolerance for the norm of the difference of iterates
Returns
-------
e: ndarray
The converged eigenvector
"""
eig_diag = d - w
if x0 is None:
x0 = np.random.randn(len(d))
x_prev = np.zeros_like(x0)
norm_x = np.linalg.norm(x0)
# the eigenvector is unique up to sign change, so iterate
# until || |x^(n)| - |x^(n-1)| ||^2 < rtol
x0 /= norm_x
while np.linalg.norm(np.abs(x0) - np.abs(x_prev)) > rtol:
x_prev = x0.copy()
tridisolve(eig_diag, e, x0)
norm_x = np.linalg.norm(x0)
x0 /= norm_x
return x0
def _dpss(N, half_nbw, Kmax):
"""Compute DPSS windows."""
# here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian
# sequences, or discrete prolate spheroidal sequences (DPSS). Only the
# first K, K = 2NW/dt orders of DPSS will exhibit good spectral
# concentration
# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here I set up an alternative symmetric tri-diagonal eigenvalue
# problem such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
nidx = np.arange(N, dtype='d')
W = float(half_nbw) / N
diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
off_diag = np.zeros_like(nidx)
off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
# put the diagonals in LAPACK "packed" storage
ab = np.zeros((2, N), 'd')
ab[1] = diagonal
ab[0, 1:] = off_diag[:-1]
# only calculate the highest Kmax eigenvalues
w = linalg.eigvals_banded(ab, select='i',
select_range=(N - Kmax, N - 1))
w = w[::-1]
# find the corresponding eigenvectors via inverse iteration
t = np.linspace(0, np.pi, N)
dpss = np.zeros((Kmax, N), 'd')
for k in range(Kmax):
dpss[k] = tridi_inverse_iteration(diagonal, off_diag, w[k],
x0=np.sin((k + 1) * t))
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
for i, f in enumerate(fix_symmetric):
if f:
dpss[2 * i] *= -1
# rather than test the sign of one point, test the sign of the
# linear slope up to the first (largest) peak
pk = np.argmax(np.abs(dpss[1::2, :N // 2]), axis=1)
for i, p in enumerate(pk):
if np.sum(dpss[2 * i + 1, :p]) < 0:
dpss[2 * i + 1] *= -1
return dpss
def _get_dpss():
try:
from scipy.signal.windows import dpss
except ImportError:
dpss = _dpss
return dpss
###############################################################################
# Triaging FFT functions to get fast pocketfft (SciPy 1.4)
try:
from scipy.fft import fft, ifft, fftfreq, rfft, irfft, rfftfreq, ifftshift
except ImportError:
from numpy.fft import fft, ifft, fftfreq, rfft, irfft, rfftfreq, ifftshift
###############################################################################
# Orth with rcond argument (SciPy 1.1)
if LooseVersion(scipy.__version__) >= '1.1':
from scipy.linalg import orth
else:
def orth(A, rcond=None): # noqa
u, s, vh = linalg.svd(A, full_matrices=False)
M, N = u.shape[0], vh.shape[1]
if rcond is None:
rcond = numpy.finfo(s.dtype).eps * max(M, N)
tol = np.amax(s) * rcond
num = np.sum(s > tol, dtype=int)
Q = u[:, :num]
return Q
###############################################################################
# NumPy Generator (NumPy 1.17)
def rng_uniform(rng):
"""Get the unform/randint from the rng."""
# prefer Generator.integers, fall back to RandomState.randint
return getattr(rng, 'integers', getattr(rng, 'randint', None))
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
###############################################################################
# Misc utilities
# Deal with nibabel 2.5 img.get_data() deprecation
def _get_img_fdata(img):
data = np.asanyarray(img.dataobj)
dtype = np.complex128 if np.iscomplexobj(data) else np.float64
return data.astype(dtype)
def _read_volume_info(fobj):
"""An implementation of nibabel.freesurfer.io._read_volume_info, since old
versions of nibabel (<=2.1.0) don't have it.
"""
volume_info = dict()
head = np.fromfile(fobj, '>i4', 1)
if not np.array_equal(head, [20]): # Read two bytes more
head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
if not np.array_equal(head, [2, 0, 20]):
warnings.warn("Unknown extension code.")
return volume_info
volume_info['head'] = head
for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']:
pair = fobj.readline().decode('utf-8').split('=')
if pair[0].strip() != key or len(pair) != 2:
raise IOError('Error parsing volume info.')
if key in ('valid', 'filename'):
volume_info[key] = pair[1].strip()
elif key == 'volume':
volume_info[key] = np.array(pair[1].split()).astype(int)
else:
volume_info[key] = np.array(pair[1].split()).astype(float)
# Ignore the rest
return volume_info
def _serialize_volume_info(volume_info):
"""An implementation of nibabel.freesurfer.io._serialize_volume_info, since
old versions of nibabel (<=2.1.0) don't have it."""
keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']
diff = set(volume_info.keys()).difference(keys)
if len(diff) > 0:
raise ValueError('Invalid volume info: %s.' % diff.pop())
strings = list()
for key in keys:
if key == 'head':
if not (np.array_equal(volume_info[key], [20]) or np.array_equal(
volume_info[key], [2, 0, 20])):
warnings.warn("Unknown extension code.")
strings.append(np.array(volume_info[key], dtype='>i4').tobytes())
elif key in ('valid', 'filename'):
val = volume_info[key]
strings.append('{} = {}\n'.format(key, val).encode('utf-8'))
elif key == 'volume':
val = volume_info[key]
strings.append('{} = {} {} {}\n'.format(
key, val[0], val[1], val[2]).encode('utf-8'))
else:
val = volume_info[key]
strings.append('{} = {:0.10g} {:0.10g} {:0.10g}\n'.format(
key.ljust(6), val[0], val[1], val[2]).encode('utf-8'))
return b''.join(strings)
##############################################################################
# adapted from scikit-learn
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
from sklearn.base import _pprint
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
# __getstate__ and __setstate__ are omitted because they only contain
# conditionals that are not satisfied by our objects (e.g.,
# ``if type(self).__module__.startswith('sklearn.')``.
# newer sklearn deprecates importing from sklearn.metrics.scoring,
# but older sklearn does not expose check_scoring in sklearn.metrics.
def _get_check_scoring():
try:
from sklearn.metrics import check_scoring # noqa
except ImportError:
from sklearn.metrics.scorer import check_scoring # noqa
return check_scoring
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as
`X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support
indexing.
"""
try:
from sklearn.utils.validation import \
_check_fit_params as _sklearn_check_fit_params
return _sklearn_check_fit_params(X, fit_params, indices)
except ImportError:
from sklearn.model_selection import _validation
fit_params_validated = \
{k: _validation._index_param_value(X, v, indices)
for k, v in fit_params.items()}
return fit_params_validated
###############################################################################
# Copied from sklearn to simplify code paths
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
# covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
# X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + _logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
# sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues
def _logdet(A):
"""Compute the log det of a positive semidefinite matrix."""
vals = linalg.eigvalsh(A)
# avoid negative (numerical errors) or zero (semi-definite matrix) values
tol = vals.max() * vals.size * np.finfo(np.float64).eps
vals = np.where(vals > tol, vals, tol)
return np.sum(np.log(vals))
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
def _assess_dimension_(spectrum, rank, n_samples, n_features):
from scipy.special import gammaln
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.) -
log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def svd_flip(u, v, u_based_decision=True):
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, np.arange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[np.arange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
# This shim can be removed once NumPy 1.19.0+ is required (1.18.4 has sign bug)
def svd(a, hermitian=False):
if hermitian: # faster
s, u = np.linalg.eigh(a)
sgn = np.sign(s)
s = np.abs(s)
sidx = np.argsort(s)[..., ::-1]
sgn = take_along_axis(sgn, sidx, axis=-1)
s = take_along_axis(s, sidx, axis=-1)
u = take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = (u * sgn[..., np.newaxis, :]).swapaxes(-2, -1).conj()
np.abs(s, out=s)
return u, s, vt
else:
return np.linalg.svd(a)
###############################################################################
# NumPy einsum backward compat (allow "optimize" arg and fix 1.14.0 bug)
# XXX eventually we should hand-tune our `einsum` calls given our array sizes!
def einsum(*args, **kwargs):
if 'optimize' not in kwargs:
kwargs['optimize'] = False
return np.einsum(*args, **kwargs)
try:
from numpy import take_along_axis
except ImportError: # NumPy < 1.15
def take_along_axis(arr, indices, axis):
# normalize inputs
if axis is None:
arr = arr.flat
arr_shape = (len(arr),) # flatiter has no .shape
axis = 0
else:
# there is a NumPy function for this, but rather than copy our
# internal uses should be correct, so just normalize quickly
if axis < 0:
axis += arr.ndim
assert 0 <= axis < arr.ndim
arr_shape = arr.shape
# use the fancy index
return arr[_make_along_axis_idx(arr_shape, indices, axis)]
def _make_along_axis_idx(arr_shape, indices, axis):
# compute dimensions to iterate over
if not np.issubdtype(indices.dtype, np.integer):
raise IndexError('`indices` must be an integer array')
if len(arr_shape) != indices.ndim:
raise ValueError(
"`indices` and `arr` must have the same number of dimensions")
shape_ones = (1,) * indices.ndim
dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
fancy_index = []
for dim, n in zip(dest_dims, arr_shape):
if dim is None:
fancy_index.append(indices)
else:
ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
fancy_index.append(np.arange(n).reshape(ind_shape))
return tuple(fancy_index)
###############################################################################
# From nilearn
def _crop_colorbar(cbar, cbar_vmin, cbar_vmax):
"""
crop a colorbar to show from cbar_vmin to cbar_vmax
Used when symmetric_cbar=False is used.
"""
if (cbar_vmin is None) and (cbar_vmax is None):
return
cbar_tick_locs = cbar.locator.locs
if cbar_vmax is None:
cbar_vmax = cbar_tick_locs.max()
if cbar_vmin is None:
cbar_vmin = cbar_tick_locs.min()
new_tick_locs = np.linspace(cbar_vmin, cbar_vmax,
len(cbar_tick_locs))
cbar.ax.set_ylim(cbar.norm(cbar_vmin), cbar.norm(cbar_vmax))
outline = cbar.outline.get_xy()
outline[:2, 1] += cbar.norm(cbar_vmin)
outline[2:6, 1] -= (1. - cbar.norm(cbar_vmax))
outline[6:, 1] += cbar.norm(cbar_vmin)
cbar.outline.set_xy(outline)
cbar.set_ticks(new_tick_locs, update_ticks=True)
###############################################################################
# Matplotlib
def _get_status(checks):
"""Deal with old MPL to get check box statuses."""
try:
return list(checks.get_status())
except AttributeError:
return [x[0].get_visible() for x in checks.lines]
###############################################################################
# Numba (optional requirement)
# Here we choose different defaults to speed things up by default
try:
import numba
if LooseVersion(numba.__version__) < LooseVersion('0.40'):
raise ImportError
prange = numba.prange
def jit(nopython=True, nogil=True, fastmath=True, cache=True,
**kwargs): # noqa
return numba.jit(nopython=nopython, nogil=nogil, fastmath=fastmath,
cache=cache, **kwargs)
except ImportError:
has_numba = False
else:
has_numba = (os.getenv('MNE_USE_NUMBA', 'true').lower() == 'true')
if not has_numba:
def jit(**kwargs): # noqa
def _jit(func):
return func
return _jit
prange = range
bincount = np.bincount
mean = np.mean
else:
@jit()
def bincount(x, weights, minlength): # noqa: D103
out = np.zeros(minlength)
for idx, w in zip(x, weights):
out[idx] += w
return out
# fix because Numba does not support axis kwarg for mean
@jit()
def _np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@jit()
def mean(array, axis):
return _np_apply_along_axis(np.mean, axis, array)
###############################################################################
# Added in Python 3.7 (remove when we drop support for 3.6)
try:
from contextlib import nullcontext
except ImportError:
from contextlib import contextmanager
@contextmanager
def nullcontext(enter_result=None):
yield enter_result
|
cjayb/mne-python
|
mne/fixes.py
|
Python
|
bsd-3-clause
| 40,148
|
[
"Gaussian",
"Mayavi"
] |
a6fd54296ee85e04016526b1f1f1ad837242963b7e26ff5bad76cff0b0065238
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
import re
from math import isinf, isnan
from future.utils import text_type, binary_type, iteritems
from ._compat import singledispatch
from .nodes import * # noqa
@singledispatch
def node_encoder(obj): # noqa
"""Convert python object to node tree."""
raise RuntimeError('Type %s not supported' % type(obj))
@node_encoder.register(dict)
def _(obj):
items = []
for key, value in iteritems(obj):
items.append((node_encoder(key), node_encoder(value)))
return Map(*items)
@node_encoder.register(list) # noqa
def _(obj):
items = []
for item in obj:
items.append(node_encoder(item))
return Sequence(*items)
@node_encoder.register(binary_type) # noqa
def _(obj):
try:
obj = text_type(obj, 'ascii')
return Str(obj)
except UnicodeDecodeError:
return Binary.from_decoded(obj)
@node_encoder.register(text_type) # noqa
def _(obj):
try:
obj.encode('ascii')
return Str(obj)
except UnicodeEncodeError:
obj = binary_type(obj, encoding='utf-8')
return Binary.from_decoded(obj)
@node_encoder.register(bool) # noqa
def _(obj):
return Bool(obj)
@node_encoder.register(int) # noqa
def _(obj):
return Int(obj)
@node_encoder.register(float) # noqa
def _(obj):
return Float(obj)
@node_encoder.register(type(None)) # noqa
def _(obj):
return Null(obj)
class SYMBOL:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
INDENT = SYMBOL('INDENT')
DEDENT = SYMBOL('DEDENT')
# noinspection PyMethodMayBeStatic
class YAMLEncoder(NodeVisitor):
"""Convert node tree into string."""
stack = []
def __init__(self, indent=None, sort_keys=None, **kw):
super(YAMLEncoder, self).__init__(**kw)
self.indent = indent or 2
self.sort_keys = sort_keys or False
def encode(self, obj):
lines = ''.join(line for line in self.iterencode(obj))
return lines
def iterencode(self, obj):
stack = []
for chunk in self._encode(obj):
stack.append(chunk)
if not chunk.endswith('\n'):
continue
yield ''.join(stack)
stack = []
yield ''.join(stack)
def _encode(self, obj): # noqa
indent_depth = 0
nodes = node_encoder(obj)
items = self.visit(nodes)
items = iter(items)
next_item = next(items)
while True:
try:
current_item, next_item = next_item, next(items)
if next_item == '\n':
current_item = current_item.rstrip(' ')
if next_item is INDENT:
indent_depth += 1
next_item = current_item
continue
if next_item is DEDENT:
indent_depth -= 1
next_item = current_item
continue
indent_spaces = ''.ljust(indent_depth * self.indent)
current_item = current_item.replace('\n', '\n{0}'.format(indent_spaces))
yield current_item
except StopIteration:
yield next_item
if next_item != '\n':
yield '\n'
if not isinstance(nodes, (Collection, Str)):
yield '...\n'
break
def visit_Sequence(self, node):
stack = []
for child in node:
stack.append('-'.ljust(self.indent))
item = (yield child)
if not isinstance(item, list):
stack.append(item)
stack.append('\n')
else:
iter_items = iter(item)
while True:
next_item = next(iter_items)
stack.append(next_item)
if next_item == '\n':
break
stack.append(INDENT)
stack.extend([next_item for next_item in iter_items])
stack.append(DEDENT)
yield stack
def iter_map_items(self, node):
if not isinstance(node, Map):
raise TypeError('Expecting %r, got %r' % (Map, type(node)))
if self.sort_keys is False:
for k, v in iteritems(node):
yield k, v
else:
for k in iter(sorted(node)):
yield k, node[k]
def visit_Map(self, node):
stack = []
for k, v in self.iter_map_items(node):
key, value = (yield k), (yield v)
is_oneliner = not isinstance(key, list) and not isinstance(value, list)
is_compact_key = isinstance(v, Scalar) and isinstance(value, list)
is_complex_key = isinstance(key, list)
if is_oneliner:
stack.append((yield key))
stack.append(': ')
stack.append((yield value))
stack.append('\n')
elif is_compact_key:
stack.append((yield key))
stack.append(': ')
stack.extend(value)
stack.append('\n')
elif not is_complex_key:
stack.append((yield key))
stack.append(': ')
stack.append('\n')
if isinstance(v, Sequence):
# special case, Map value -> Sequence has optional indent.
stack.extend(value)
else:
stack.append(INDENT)
stack.extend(value)
stack.append(DEDENT)
yield stack
def visit_Scalar(self, node):
return repr(node.value)
def visit_Str(self, node):
value = text_type(node.value)
if not value:
return '""'
use_repr = any([ # :off
value.lower() in ['yes', 'no', 'true', 'false'],
value.isnumeric(),
is_float(value)
]) # :on
method = repr if use_repr else str
if value.endswith('\n') and '\n' in value[:-1]:
stack = ['|\n', INDENT]
stack.extend(method(node.value).splitlines(True))
stack.append(DEDENT)
return stack
if value.endswith('\n'):
return ['>\n', INDENT, method(node.value), DEDENT]
return method(node.value)
def visit_Bool(self, node):
return self.visit_Scalar(node).lower()
visit_Int = visit_Scalar
def visit_Float(self, node):
if isnan(node.value):
return '.nan'
if isinf(node.value):
return repr(node.value).replace('inf', '.inf')
return repr(node.value)
# noinspection PyUnusedLocal
def visit_Null(self, node):
return 'null'
def visit_Binary(self, node):
stack = ['!!binary |\n', INDENT]
stack.extend(node.raw_value.splitlines(True))
stack.append(DEDENT)
return stack
re_float = re.compile(r'[+-]?(?:\d*\.\d+|\d+\.\d)')
def is_float(string):
return not not re_float.match(string)
|
bionikspoon/pureyaml
|
pureyaml/encoder.py
|
Python
|
mit
| 7,172
|
[
"VisIt"
] |
f0e1bf3de0bac1e5f3473dd849002cd4d222beca5249cea461c1d23489f2dc85
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
..note:: Deprecated in 2.4.0. It will be removed in 3.0.0. Use ClusteringEvaluator instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 2.4.0. It will be removed in 3.0.0. Use ClusteringEvaluator "
"instead. You can also get the cost on the training dataset in the summary.",
DeprecationWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol, HasMaxIter,
HasTol, HasSeed, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
2.000...
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol,
HasMaxIter, HasSeed, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure` or its default value.
"""
return self.getOrDefault(self.distanceMeasure)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
pass
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currenlty only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class PowerIterationClustering(HasMaxIter, HasWeightCol, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
<a href=http://www.icml2010.org/papers/387.pdf>Lin and Cohen</a>. From the abstract:
PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. seealso:: `Wikipedia on Spectral clustering \
<http://en.wikipedia.org/wiki/Spectral_clustering>`_
>>> data = [(1, 0, 0.5), \
(2, 0, 0.5), (2, 1, 0.7), \
(3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9), \
(4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1), \
(5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight")
>>> pic = PowerIterationClustering(k=2, maxIter=40, weightCol="weight")
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |1 |
|1 |1 |
|2 |1 |
|3 |1 |
|4 |1 |
|5 |0 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
.. versionadded:: 2.4.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
:param dataset:
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
:return:
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
eyalfa/spark
|
python/pyspark/ml/clustering.py
|
Python
|
apache-2.0
| 50,292
|
[
"Gaussian"
] |
be19fe278a5288956a36ec3a0d7126fa22a7783db276167b6b2d831e199416b2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.